From 50ea05efaf3bed7dd34bcc2635a8b3f53bd0ccc1 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 30 Dec 2015 16:06:04 +0200 Subject: mac80211: pass block ack session timeout to to driver Currently mac80211 does not inform the driver of the session block ack timeout when starting a rx aggregation session. Drivers that manage the reorder buffer need to know this parameter. Seeing that there are now too many arguments for the drv_ampdu_action() function, wrap them inside a structure. Signed-off-by: Sara Sharon Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/ath10k/mac.c | 7 ++++--- drivers/net/wireless/ath/ath9k/htc_drv_main.c | 7 ++++--- drivers/net/wireless/ath/ath9k/main.c | 8 +++++--- drivers/net/wireless/ath/carl9170/main.c | 8 +++++--- drivers/net/wireless/ath/wcn36xx/main.c | 8 +++++--- .../broadcom/brcm80211/brcmsmac/mac80211_if.c | 8 +++++--- drivers/net/wireless/intel/iwlegacy/4965-mac.c | 8 +++++--- drivers/net/wireless/intel/iwlegacy/4965.h | 4 +--- drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c | 9 ++++++--- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 9 ++++++--- drivers/net/wireless/mac80211_hwsim.c | 8 +++++--- drivers/net/wireless/marvell/mwl8k.c | 10 ++++++---- drivers/net/wireless/mediatek/mt7601u/main.c | 8 +++++--- drivers/net/wireless/ralink/rt2x00/rt2800lib.c | 7 ++++--- drivers/net/wireless/ralink/rt2x00/rt2800lib.h | 4 +--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++--- drivers/net/wireless/realtek/rtlwifi/core.c | 8 +++++--- drivers/net/wireless/rsi/rsi_91x_mac80211.c | 19 +++++++------------ drivers/net/wireless/st/cw1200/sta.c | 4 +--- drivers/net/wireless/st/cw1200/sta.h | 4 +--- drivers/net/wireless/ti/wlcore/main.c | 8 +++++--- 21 files changed, 89 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6146a293601a..368de5e5a04f 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -6366,12 +6366,13 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static int ath10k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct ath10k *ar = hw->priv; struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", arvif->vdev_id, sta->addr, tid, action); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index fe1fd1a5ae15..639294a9e34d 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -1657,13 +1657,14 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw, static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, - u16 tid, u16 *ssn, u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct ath9k_htc_priv *priv = hw->priv; struct ath9k_htc_sta *ista; int ret = 0; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; mutex_lock(&priv->mutex); ath9k_htc_ps_wakeup(priv); diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index c1b33fdcca08..cf58a304e9f0 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1864,14 +1864,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) static int ath9k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, - u16 tid, u16 *ssn, u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct ath_softc *sc = hw->priv; struct ath_common *common = ath9k_hw_common(sc->sc_ah); bool flush = false; int ret = 0; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; mutex_lock(&sc->mutex); diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 19d3d64416bf..4d1527a2e292 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -1413,10 +1413,12 @@ static void carl9170_ampdu_work(struct work_struct *work) static int carl9170_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, - u16 tid, u16 *ssn, u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; struct ar9170 *ar = hw->priv; struct carl9170_sta_info *sta_info = (void *) sta->drv_priv; struct carl9170_sta_tid *tid_info; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 7c169abdbafe..a27279c2c695 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -857,12 +857,14 @@ static int wcn36xx_resume(struct ieee80211_hw *hw) static int wcn36xx_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct wcn36xx *wcn = hw->priv; struct wcn36xx_sta *sta_priv = NULL; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n", action, tid); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index bec2dc1ca2e4..61ae2768132a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -818,13 +818,15 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, static int brcms_ops_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct brcms_info *wl = hw->priv; struct scb *scb = &wl->wlc->pri_scb; int status; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u8 buf_size = params->buf_size; if (WARN_ON(scb->magic != SCB_MAGIC)) return -EIDRM; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index fd38aa0763e4..b75f4ef3cdc7 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 * ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct il_priv *il = hw->priv; int ret = -EINVAL; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid); diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h index 8ab8706f9422..e432715e02d8 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.h +++ b/drivers/net/wireless/intel/iwlegacy/4965.h @@ -182,9 +182,7 @@ void il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_sta *sta, u32 iv32, u16 *phase1key); int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 * ssn, - u8 buf_size, bool amsdu); + struct ieee80211_ampdu_params *params); int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); void diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 29ea1c6705b4..151721e4040c 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -732,12 +732,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg) static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int ret = -EINVAL; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 buf_size = params->buf_size; struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d70a1716f3e0..1bd3f0b700d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -837,13 +837,16 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, - u16 *ssn, u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int ret; bool tx_agg_ref = false; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 buf_size = params->buf_size; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c32889a1e39c..e31a94fd6135 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1845,10 +1845,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + switch (action) { case IEEE80211_AMPDU_TX_START: ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index 30e3aaae32e2..088429d0a634 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -5421,11 +5421,13 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx, static int mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { - + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 buf_size = params->buf_size; int i, rc = 0; struct mwl8k_priv *priv = hw->priv; struct mwl8k_ampdu_stream *stream; diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index f715eee39851..e70dd9523911 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -334,11 +334,13 @@ static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value) static int mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, - bool amsdu) + struct ieee80211_ampdu_params *params) { struct mt7601u_dev *dev = hw->priv; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; WARN_ON(msta->wcid.idx > GROUP_WCID(0)); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 9733b31a780d..69c1c09687a3 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -7935,10 +7935,11 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) EXPORT_SYMBOL_GPL(rt2800_get_tsf); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv; int ret = 0; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 440790b92b19..83f1a44fb9b4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -218,9 +218,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw, const struct ieee80211_tx_queue_params *params); u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu); + struct ieee80211_ampdu_params *params); int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey); void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6aed923a709a..7d820c395375 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5375,13 +5375,13 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, static int rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size, - bool amsdu) + struct ieee80211_ampdu_params *params) { struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; u8 ampdu_factor, ampdu_density; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; switch (action) { case IEEE80211_AMPDU_TX_START: diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4ae421ef30d9..f2507610314b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -1371,11 +1371,13 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw, static int rtl_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; switch (action) { case IEEE80211_AMPDU_TX_START: diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index b5bcc933a2a6..4df992de7d07 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -659,29 +659,24 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw, * informs the f/w regarding this. * @hw: Pointer to the ieee80211_hw structure. * @vif: Pointer to the ieee80211_vif structure. - * @action: ieee80211_ampdu_mlme_action enum. - * @sta: Pointer to the ieee80211_sta structure. - * @tid: Traffic identifier. - * @ssn: Pointer to ssn value. - * @buf_size: Buffer size (for kernel version > 2.6.38). - * @amsdu: is AMSDU in AMPDU allowed + * @params: Pointer to A-MPDU action parameters * * Return: status: 0 on success, negative error code on failure. */ static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, - unsigned short tid, - unsigned short *ssn, - unsigned char buf_size, - bool amsdu) + struct ieee80211_ampdu_params *params) { int status = -EOPNOTSUPP; struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; u16 seq_no = 0; u8 ii = 0; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; + u8 buf_size = params->buf_size; for (ii = 0; ii < RSI_MAX_VIFS; ii++) { if (vif == adapter->vifs[ii]) diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index 06321c799c90..d0ddcde6c695 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -2129,9 +2129,7 @@ void cw1200_mcast_timeout(unsigned long arg) int cw1200_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { /* Aggregation is implemented fully in firmware, * including block ack negotiation. Do not allow diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h index bebb3379017f..a0bacaa39b31 100644 --- a/drivers/net/wireless/st/cw1200/sta.h +++ b/drivers/net/wireless/st/cw1200/sta.h @@ -109,9 +109,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev, u32 changed); int cw1200_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu); + struct ieee80211_ampdu_params *params); void cw1200_suspend_resume(struct cw1200_common *priv, struct wsm_suspend_resume *arg); diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index d1109c4f0f0d..45662cf3169f 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5187,14 +5187,16 @@ out: static int wl1271_op_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - enum ieee80211_ampdu_mlme_action action, - struct ieee80211_sta *sta, u16 tid, u16 *ssn, - u8 buf_size, bool amsdu) + struct ieee80211_ampdu_params *params) { struct wl1271 *wl = hw->priv; struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); int ret; u8 hlid, *ba_bitmap; + struct ieee80211_sta *sta = params->sta; + enum ieee80211_ampdu_mlme_action action = params->action; + u16 tid = params->tid; + u16 *ssn = ¶ms->ssn; wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action, tid); -- cgit v1.2.3 From d742c969b3ad9c83ac57579dfd56c90dd437db37 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Wed, 13 Jan 2016 14:52:52 +0100 Subject: ath10k: prevent txrx running for too long On multicore systems it was possible for the txrx tasklet to keep on running for long periods of time on a single CPU due to tx completion processing. Another CPU could feed the running tasklet for an indefinite amount of time. The tasklet is now guaranteed to run a finite amount of time and is limited by HTT CE Rx ring depth. This improves behavior when RPS is used on target system and might improve TCP handling as well. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 91afa3ae414c..3079434b5d9b 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2156,10 +2156,18 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) { struct ath10k_htt *htt = (struct ath10k_htt *)ptr; struct ath10k *ar = htt->ar; + struct sk_buff_head tx_q; struct htt_resp *resp; struct sk_buff *skb; + unsigned long flags; - while ((skb = skb_dequeue(&htt->tx_compl_q))) { + __skb_queue_head_init(&tx_q); + + spin_lock_irqsave(&htt->tx_compl_q.lock, flags); + skb_queue_splice_init(&htt->tx_compl_q, &tx_q); + spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags); + + while ((skb = __skb_dequeue(&tx_q))) { ath10k_htt_rx_frm_tx_compl(htt->ar, skb); dev_kfree_skb_any(skb); } -- cgit v1.2.3 From de46c015c9b703bb8abb91856ed45e3639c7e7d2 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:28 +0530 Subject: ath10k: add support for parsing per STA rx_duration for 10.2.4 Add support for parsing 'peer_rx_duration' as part of 10.2.4 peer_stats. Also register PEER_STATS service for parsing 'rx_duration' (and for any new fields to be added as part of peer_stats). Have checks for backward compatibility with older 10.2.4 firmware incase PEER_STATS service is not enabled Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/wmi.c | 22 ++++++++++++++++++++-- drivers/net/wireless/ath/ath10k/wmi.h | 12 ++++++++++++ 3 files changed, 33 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 7840cf3ef7a6..d47a08568bdc 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -159,6 +159,7 @@ struct ath10k_fw_stats_peer { u32 peer_rssi; u32 peer_tx_rate; u32 peer_rx_rate; /* 10x only */ + u32 rx_duration; }; struct ath10k_fw_stats_vdev { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index a7c3d299639b..c610d1761940 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -2862,11 +2862,20 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, /* fw doesn't implement vdev stats */ for (i = 0; i < num_peer_stats; i++) { - const struct wmi_10_2_4_peer_stats *src; + const struct wmi_10_2_4_ext_peer_stats *src; struct ath10k_fw_stats_peer *dst; + int stats_len; + bool ext_peer_stats_support; + + ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS, + ar->wmi.svc_map); + if (ext_peer_stats_support) + stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats); + else + stats_len = sizeof(struct wmi_10_2_4_peer_stats); src = (void *)skb->data; - if (!skb_pull(skb, sizeof(*src))) + if (!skb_pull(skb, stats_len)) return -EPROTO; dst = kzalloc(sizeof(*dst), GFP_ATOMIC); @@ -2876,6 +2885,9 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar, ath10k_wmi_pull_peer_stats(&src->common.old, dst); dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate); + + if (ext_peer_stats_support) + dst->rx_duration = __le32_to_cpu(src->rx_duration); /* FIXME: expose 10.2 specific values */ list_add_tail(&dst->list, &stats->peers); @@ -5517,6 +5529,9 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map)) features |= WMI_10_2_COEX_GPIO; + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) + features |= WMI_10_2_PEER_STATS; + cmd->resource_config.feature_mask = __cpu_to_le32(features); memcpy(&cmd->resource_config.common, &config, sizeof(config)); @@ -7126,6 +7141,9 @@ ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer, "Peer TX rate", peer->peer_tx_rate); len += scnprintf(buf + len, buf_len - len, "%30s %u\n", "Peer RX rate", peer->peer_rx_rate); + len += scnprintf(buf + len, buf_len - len, "%30s %u\n", + "Peer RX duration", peer->rx_duration); + len += scnprintf(buf + len, buf_len - len, "\n"); *length = len; } diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index d85ad7855d20..e1bb02ba207d 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -177,6 +177,7 @@ enum wmi_service { WMI_SERVICE_BSS_CHANNEL_INFO_64, WMI_SERVICE_EXT_RES_CFG_SUPPORT, WMI_SERVICE_MESH, + WMI_SERVICE_PEER_STATS, /* keep last */ WMI_SERVICE_MAX, @@ -213,6 +214,7 @@ enum wmi_10x_service { WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, WMI_10X_SERVICE_MESH, WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, + WMI_10X_SERVICE_PEER_STATS, }; enum wmi_main_service { @@ -386,6 +388,7 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT); SVCSTR(WMI_SERVICE_MESH); + SVCSTR(WMI_SERVICE_PEER_STATS); default: return NULL; } @@ -463,6 +466,8 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_MESH, len); SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); + SVCMAP(WMI_10X_SERVICE_PEER_STATS, + WMI_SERVICE_PEER_STATS, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, @@ -2417,6 +2422,7 @@ enum wmi_10_2_feature_mask { WMI_10_2_RX_BATCH_MODE = BIT(0), WMI_10_2_ATF_CONFIG = BIT(1), WMI_10_2_COEX_GPIO = BIT(3), + WMI_10_2_PEER_STATS = BIT(7), }; struct wmi_resource_config_10_2 { @@ -4230,6 +4236,12 @@ struct wmi_10_2_4_peer_stats { __le32 unknown_value; /* FIXME: what is this word? */ } __packed; +struct wmi_10_2_4_ext_peer_stats { + struct wmi_10_2_peer_stats common; + __le32 peer_rssi_changed; + __le32 rx_duration; +} __packed; + struct wmi_10_4_peer_stats { struct wmi_mac_addr peer_macaddr; __le32 peer_rssi; -- cgit v1.2.3 From 774e656e94cdd8db27a8831abb3d35fbb95d2bc6 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:29 +0530 Subject: ath10k: fix naming Peer stats rssi_changed field in 10.2.4 Fix naming of peer stats rssi_changed field in 10.2.4 to make it more readable. As of now this field is not used, but necessary to pull in fw_stats with appropriate length. Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index e1bb02ba207d..187f495c1fbe 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4233,7 +4233,7 @@ struct wmi_10_2_peer_stats { struct wmi_10_2_4_peer_stats { struct wmi_10_2_peer_stats common; - __le32 unknown_value; /* FIXME: what is this word? */ + __le32 peer_rssi_changed; } __packed; struct wmi_10_2_4_ext_peer_stats { -- cgit v1.2.3 From 1fe374f5b07ed58e485fdbfa7410876e25ce1a78 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:30 +0530 Subject: ath10k: cleanup setting pdev paramaters Replace the local variable 'burst_enable' with 'param' for mapping and setting pdev paraemeters and with this patch pretty easy to extend support for new parameters adhering to linux kernel coding guidelines Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 6146a293601a..0d7671c778fd 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -3988,7 +3988,7 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) static int ath10k_start(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; - u32 burst_enable; + u32 param; int ret = 0; /* @@ -4031,13 +4031,15 @@ static int ath10k_start(struct ieee80211_hw *hw) goto err_power_down; } - ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); + param = ar->wmi.pdev_param->pmf_qos; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); if (ret) { ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); goto err_core_stop; } - ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); + param = ar->wmi.pdev_param->dynamic_bw; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); if (ret) { ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); goto err_core_stop; @@ -4053,8 +4055,8 @@ static int ath10k_start(struct ieee80211_hw *hw) } if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { - burst_enable = ar->wmi.pdev_param->burst_enable; - ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0); + param = ar->wmi.pdev_param->burst_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, 0); if (ret) { ath10k_warn(ar, "failed to disable burst: %d\n", ret); goto err_core_stop; @@ -4072,8 +4074,8 @@ static int ath10k_start(struct ieee80211_hw *hw) * this problem. */ - ret = ath10k_wmi_pdev_set_param(ar, - ar->wmi.pdev_param->arp_ac_override, 0); + param = ar->wmi.pdev_param->arp_ac_override; + ret = ath10k_wmi_pdev_set_param(ar, param, 0); if (ret) { ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", ret); @@ -4092,8 +4094,8 @@ static int ath10k_start(struct ieee80211_hw *hw) } } - ret = ath10k_wmi_pdev_set_param(ar, - ar->wmi.pdev_param->ani_enable, 1); + param = ar->wmi.pdev_param->ani_enable; + ret = ath10k_wmi_pdev_set_param(ar, param, 1); if (ret) { ath10k_warn(ar, "failed to enable ani by default: %d\n", ret); -- cgit v1.2.3 From b4619ea2f72153df42d8ba2894b2af5e621d6df6 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:31 +0530 Subject: ath10k: rename few function names of firmware stats Prerequisite patch to address checkpatch errors for the next patch in this series, this function names are bit too long, make it short Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 2bdf5408b0d9..4fb1e0eb3408 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -276,7 +276,7 @@ static const struct file_operations fops_wmi_services = { .llseek = default_llseek, }; -static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) +static void ath10k_fw_stats_pdevs_free(struct list_head *head) { struct ath10k_fw_stats_pdev *i, *tmp; @@ -286,7 +286,7 @@ static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head) } } -static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head) +static void ath10k_fw_stats_vdevs_free(struct list_head *head) { struct ath10k_fw_stats_vdev *i, *tmp; @@ -296,7 +296,7 @@ static void ath10k_debug_fw_stats_vdevs_free(struct list_head *head) } } -static void ath10k_debug_fw_stats_peers_free(struct list_head *head) +static void ath10k_fw_stats_peers_free(struct list_head *head) { struct ath10k_fw_stats_peer *i, *tmp; @@ -310,9 +310,9 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar) { spin_lock_bh(&ar->data_lock); ar->debug.fw_stats_done = false; - ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); - ath10k_debug_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); - ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers); + ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs); + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); spin_unlock_bh(&ar->data_lock); } @@ -391,9 +391,9 @@ free: /* In some cases lists have been spliced and cleared. Free up * resources if that is not the case. */ - ath10k_debug_fw_stats_pdevs_free(&stats.pdevs); - ath10k_debug_fw_stats_vdevs_free(&stats.vdevs); - ath10k_debug_fw_stats_peers_free(&stats.peers); + ath10k_fw_stats_pdevs_free(&stats.pdevs); + ath10k_fw_stats_vdevs_free(&stats.vdevs); + ath10k_fw_stats_peers_free(&stats.peers); spin_unlock_bh(&ar->data_lock); } -- cgit v1.2.3 From d57e7f2e7e042a7cef19702abe42f1e9b55d754c Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:32 +0530 Subject: ath10k: provision to support periodic peer stats update Enable support for periodic peer stats update when peer stats service is enabled. The host to update the peer stats received from the firmware periodically, since firmware will reset this to zero after sometime (due to memory constraints) While enabling periodic peer / vdev stats cleanup the existing list in debugfs if max limit is reached, so that the new stats is updated. Ideally speaking based on 'Michal Kazior's' suggestion we need to completely move to periodic update of all the stats making the 'ping - pong' method obselete. This requires a bit of re-work and some testing as well, also confirmation regarding backward comptability for various firmware and chipsets. Hence allow periodic update only for peer_stats. Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 4fb1e0eb3408..3b8da817972b 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -347,7 +347,8 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * delivered which is treated as end-of-data and is itself discarded */ - if (ar->debug.fw_stats_done) { + if (ar->debug.fw_stats_done && + !test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { ath10k_warn(ar, "received unsolicited stats update event\n"); goto free; } @@ -372,11 +373,13 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) /* Although this is unlikely impose a sane limit to * prevent firmware from DoS-ing the host. */ + ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers); ath10k_warn(ar, "dropping fw peer stats\n"); goto free; } if (num_vdevs >= BITS_PER_LONG) { + ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs); ath10k_warn(ar, "dropping fw vdev stats\n"); goto free; } -- cgit v1.2.3 From 8351c052194b30f852db6225a568e48ee2a4947b Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:33 +0530 Subject: ath10k: enable periodic peer stats update Register for 500ms as periodic peer stats update period, and parameters like rx_duration that needs to be tracked in host can be achieved by this provision, also periodic stats update is the future of fw_stats and shall be extended for pdev / vdev stats irrespecitive PEER_STATS service is enabled or not Signed-off-by: Mohammed Shafi Shajakhan [kvalo@qca.qualcomm.com: fix a checkpatch warning] Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 0d7671c778fd..ef0438d2cc8f 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4104,6 +4104,18 @@ static int ath10k_start(struct ieee80211_hw *hw) ar->ani_enabled = true; + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + param = ar->wmi.pdev_param->peer_stats_update_period; + ret = ath10k_wmi_pdev_set_param(ar, param, + PEER_DEFAULT_STATS_UPDATE_PERIOD); + if (ret) { + ath10k_warn(ar, + "failed to set peer stats period : %d\n", + ret); + goto err_core_stop; + } + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar); -- cgit v1.2.3 From 856e7c3084399fb7e029628dbe43dcb9bb7d7b5b Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 13 Jan 2016 21:16:34 +0530 Subject: ath10k: add debugfs support for Per STA total rx duration Add debugfs support for per client total rx duration, track this via the report of Peer stats rx duration reported for every 500ms Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.h | 1 + drivers/net/wireless/ath/ath10k/debug.c | 9 ++++-- drivers/net/wireless/ath/ath10k/debug.h | 6 ++++ drivers/net/wireless/ath/ath10k/debugfs_sta.c | 41 +++++++++++++++++++++++++++ 4 files changed, 54 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index d47a08568bdc..921b86a4f257 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -316,6 +316,7 @@ struct ath10k_sta { #ifdef CONFIG_MAC80211_DEBUGFS /* protected by conf_mutex */ bool aggr_mode; + u64 rx_duration; #endif }; diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3b8da817972b..8d4148a96af8 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -319,7 +319,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar) void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) { struct ath10k_fw_stats stats = {}; - bool is_start, is_started, is_end; + bool is_start, is_started, is_end, peer_stats_svc; size_t num_peers; size_t num_vdevs; int ret; @@ -347,8 +347,8 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) * delivered which is treated as end-of-data and is itself discarded */ - if (ar->debug.fw_stats_done && - !test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map); + if (ar->debug.fw_stats_done && !peer_stats_svc) { ath10k_warn(ar, "received unsolicited stats update event\n"); goto free; } @@ -384,6 +384,9 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) goto free; } + if (peer_stats_svc) + ath10k_sta_update_rx_duration(ar, &stats.peers); + list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); } diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index 814719cf4f22..f273478e2afb 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -153,6 +153,12 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) #ifdef CONFIG_MAC80211_DEBUGFS void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir); +void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *peer); +#else +static inline void ath10k_sta_update_rx_duration(struct ath10k *ar, + struct list_head *peer) +{ +} #endif /* CONFIG_MAC80211_DEBUGFS */ #ifdef CONFIG_ATH10K_DEBUG diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index 95b5c49374e0..67ef75b60567 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -18,6 +18,23 @@ #include "wmi-ops.h" #include "debug.h" +void ath10k_sta_update_rx_duration(struct ath10k *ar, struct list_head *head) +{ struct ieee80211_sta *sta; + struct ath10k_fw_stats_peer *peer; + struct ath10k_sta *arsta; + + rcu_read_lock(); + list_for_each_entry(peer, head, list) { + sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr, + NULL); + if (!sta) + continue; + arsta = (struct ath10k_sta *)sta->drv_priv; + arsta->rx_duration += (u64)peer->rx_duration; + } + rcu_read_unlock(); +} + static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -232,6 +249,28 @@ static const struct file_operations fops_delba = { .llseek = default_llseek, }; +static ssize_t ath10k_dbg_sta_read_rx_duration(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_sta *sta = file->private_data; + struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + char buf[100]; + int len = 0; + + len = scnprintf(buf, sizeof(buf), + "%llu usecs\n", arsta->rx_duration); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static const struct file_operations fops_rx_duration = { + .read = ath10k_dbg_sta_read_rx_duration, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct dentry *dir) { @@ -240,4 +279,6 @@ void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif, debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba); debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp); debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba); + debugfs_create_file("rx_duration", S_IRUGO, dir, sta, + &fops_rx_duration); } -- cgit v1.2.3 From 8b019fb0d0e75e2f682f17742b99c0ca970ca024 Mon Sep 17 00:00:00 2001 From: Yanbo Li Date: Thu, 14 Jan 2016 15:39:19 -0800 Subject: ath10k: remove the p2p notice of absence info from 10.2.4 FW beacon info The p2p NOA never been supported at 10.2.4 FW, remove it to avoid SWBA event parse error for multi beacon interval case. Signed-off-by: Yanbo Li Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.c | 46 ++++++++++++++++++++++++++++++++++- drivers/net/wireless/ath/ath10k/wmi.h | 10 ++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index c610d1761940..0f01a8d99604 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3256,6 +3256,50 @@ static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, return 0; } +static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar, + struct sk_buff *skb, + struct wmi_swba_ev_arg *arg) +{ + struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data; + u32 map; + size_t i; + + if (skb->len < sizeof(*ev)) + return -EPROTO; + + skb_pull(skb, sizeof(*ev)); + arg->vdev_map = ev->vdev_map; + + for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) { + if (!(map & BIT(0))) + continue; + + /* If this happens there were some changes in firmware and + * ath10k should update the max size of tim_info array. + */ + if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info))) + break; + + if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) > + sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) { + ath10k_warn(ar, "refusing to parse invalid swba structure\n"); + return -EPROTO; + } + + arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len; + arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast; + arg->tim_info[i].tim_bitmap = + ev->bcn_info[i].tim_info.tim_bitmap; + arg->tim_info[i].tim_changed = + ev->bcn_info[i].tim_info.tim_changed; + arg->tim_info[i].tim_num_ps_pending = + ev->bcn_info[i].tim_info.tim_num_ps_pending; + i++; + } + + return 0; +} + static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb, struct wmi_swba_ev_arg *arg) @@ -7584,7 +7628,7 @@ static const struct wmi_ops wmi_10_2_4_ops = { .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev, .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev, .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev, - .pull_swba = ath10k_wmi_op_pull_swba_ev, + .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev, .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr, .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev, .pull_rdy = ath10k_wmi_op_pull_rdy_ev, diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 187f495c1fbe..806d4bb6e7d6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -5454,6 +5454,16 @@ struct wmi_host_swba_event { struct wmi_bcn_info bcn_info[0]; } __packed; +struct wmi_10_2_4_bcn_info { + struct wmi_tim_info tim_info; + /* The 10.2.4 FW doesn't have p2p NOA info */ +} __packed; + +struct wmi_10_2_4_host_swba_event { + __le32 vdev_map; + struct wmi_10_2_4_bcn_info bcn_info[0]; +} __packed; + /* 16 words = 512 client + 1 word = for guard */ #define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17 -- cgit v1.2.3 From b9a9693fd9aea43f50b107dfc8cbaea317f95a79 Mon Sep 17 00:00:00 2001 From: Kalle Valo Date: Fri, 27 Nov 2015 09:37:14 +0100 Subject: ath9k: request NOA update when chanctx active Request NOA update when chanctx active, also in case of STA. Signed-off-by: Janusz Dziedzic Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/channel.c | 2 +- drivers/net/wireless/ath/ath9k/main.c | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 50e614b915f1..0bcb82abd094 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -1454,7 +1454,7 @@ static void ath9k_update_p2p_ps(struct ath_softc *sc, struct ieee80211_vif *vif) if (!sc->p2p_ps_timer) return; - if (vif->type != NL80211_IFTYPE_STATION || !vif->p2p) + if (vif->type != NL80211_IFTYPE_STATION) return; sc->p2p_ps_vif = avp; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index c1b33fdcca08..4ae63fd4290e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -978,7 +978,7 @@ static void ath9k_update_bssid_mask(struct ath_softc *sc, if (ctx->nvifs_assigned != 1) continue; - if (!avp->vif->p2p || !iter_data->has_hw_macaddr) + if (!iter_data->has_hw_macaddr) continue; ether_addr_copy(common->curbssid, avp->bssid); @@ -1255,6 +1255,9 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); sc->cur_chan->nvifs++; + if (vif->type == NL80211_IFTYPE_STATION && ath9k_is_chanctx_enabled()) + vif->driver_flags |= IEEE80211_VIF_GET_NOA_UPDATE; + if (ath9k_uses_beacons(vif->type)) ath9k_beacon_assign_slot(sc, vif); -- cgit v1.2.3 From aea57edf80c6e96d6dc24757599396af99c02b19 Mon Sep 17 00:00:00 2001 From: Alexander Tsoy Date: Fri, 8 Jan 2016 01:26:03 +0300 Subject: ath9k_htc: add device ID for Toshiba WLM-20U2/GN-1080 This device is available under different marketing names: WLM-20U2 - Wireless USB Dongle for Toshiba TVs GN-1080 - Wireless LAN Module for Toshiba MFPs. Signed-off-by: Alexander Tsoy Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hif_usb.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 165dd202c365..8cbf4904db7b 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -55,6 +55,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = { .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */ { USB_DEVICE(0x04da, 0x3904), .driver_info = AR9280_USB }, + { USB_DEVICE(0x0930, 0x0a08), + .driver_info = AR9280_USB }, /* Toshiba WLM-20U2 and GN-1080 */ { USB_DEVICE(0x0cf3, 0x20ff), .driver_info = STORAGE_DEVICE }, -- cgit v1.2.3 From c5a157e4d7d7f1d39dbb382bc3ad7ea9f248ed7d Mon Sep 17 00:00:00 2001 From: Lior David Date: Sun, 17 Jan 2016 12:39:06 +0200 Subject: wil6210: fix privacy flag calculation in change_beacon Currently the privacy flag calculation in change_beacon checks for RSN IE inside proberesp_ies, but normally it is not found there. It works today because of code inside the function wil_fix_bcon, but wil_fix_bcon is not directly related to change_beacon, and it may be changed or removed in the future and break the calculation. To fix this issue, change the privacy flag calculation to check RSN IE inside the beacon itself. The new check is more reliable and will not be sensitive to changes in wil_fix_bcon. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 20d07ef679e8..182f071f722d 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -828,9 +828,9 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, wil_print_bcon_data(bcon); } - if (bcon->proberesp_ies && - cfg80211_find_ie(WLAN_EID_RSN, bcon->proberesp_ies, - bcon->proberesp_ies_len)) + if (bcon->tail && + cfg80211_find_ie(WLAN_EID_RSN, bcon->tail, + bcon->tail_len)) privacy = 1; /* in case privacy has changed, need to restart the AP */ -- cgit v1.2.3 From c100c883e785802ab333604237928a65ae62c6bc Mon Sep 17 00:00:00 2001 From: Lior David Date: Sun, 17 Jan 2016 12:39:07 +0200 Subject: wil6210: use extra IEs from probe response In the start_ap/change_beacon API, when we set up probe response offloading, we only use the IE list from the probe response template and not the IE list from the proberesp_ies argument. As a result, we miss important IEs and it causes problems with some scenarios such as P2P. With this change, we merge the list of IEs from the probe response template and proberesp_ies and send the merged list to the FW for offloading. It is still FW responsibility to filter out irrelevant IEs when sending probe response, based on the actual contents of the probe request. Also in case association response termplate is not provided, we will use the merged list of IEs from probe response in the association response as well. Signed-off-by: Lior David Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 141 ++++++++++++++++++++-------- 1 file changed, 102 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 182f071f722d..774352f9e256 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -696,6 +696,79 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy, return rc; } +/** + * find a specific IE in a list of IEs + * return a pointer to the beginning of IE in the list + * or NULL if not found + */ +static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie, + u16 ie_len) +{ + struct ieee80211_vendor_ie *vie; + u32 oui; + + /* IE tag at offset 0, length at offset 1 */ + if (ie_len < 2 || 2 + ie[1] > ie_len) + return NULL; + + if (ie[0] != WLAN_EID_VENDOR_SPECIFIC) + return cfg80211_find_ie(ie[0], ies, ies_len); + + /* make sure there is room for 3 bytes OUI + 1 byte OUI type */ + if (ie[1] < 4) + return NULL; + vie = (struct ieee80211_vendor_ie *)ie; + oui = vie->oui[0] << 16 | vie->oui[1] << 8 | vie->oui[2]; + return cfg80211_find_vendor_ie(oui, vie->oui_type, ies, + ies_len); +} + +/** + * merge the IEs in two lists into a single list. + * do not include IEs from the second list which exist in the first list. + * add only vendor specific IEs from second list to keep + * the merged list sorted (since vendor-specific IE has the + * highest tag number) + * caller must free the allocated memory for merged IEs + */ +static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len, + const u8 *ies2, u16 ies2_len, + u8 **merged_ies, u16 *merged_len) +{ + u8 *buf, *dpos; + const u8 *spos; + + if (ies1_len == 0 && ies2_len == 0) { + *merged_ies = NULL; + *merged_len = 0; + return 0; + } + + buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, ies1, ies1_len); + dpos = buf + ies1_len; + spos = ies2; + while (spos + 1 < ies2 + ies2_len) { + /* IE tag at offset 0, length at offset 1 */ + u16 ielen = 2 + spos[1]; + + if (spos + ielen > ies2 + ies2_len) + break; + if (spos[0] == WLAN_EID_VENDOR_SPECIFIC && + !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) { + memcpy(dpos, spos, ielen); + dpos += ielen; + } + spos += ielen; + } + + *merged_ies = buf; + *merged_len = dpos - buf; + return 0; +} + static void wil_print_bcon_data(struct cfg80211_beacon_data *b) { print_hex_dump_bytes("head ", DUMP_PREFIX_OFFSET, @@ -712,49 +785,49 @@ static void wil_print_bcon_data(struct cfg80211_beacon_data *b) b->assocresp_ies, b->assocresp_ies_len); } -static int wil_fix_bcon(struct wil6210_priv *wil, - struct cfg80211_beacon_data *bcon) -{ - struct ieee80211_mgmt *f = (struct ieee80211_mgmt *)bcon->probe_resp; - size_t hlen = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); - - if (bcon->probe_resp_len <= hlen) - return 0; - -/* always use IE's from full probe frame, they has more info - * notable RSN - */ - bcon->proberesp_ies = f->u.probe_resp.variable; - bcon->proberesp_ies_len = bcon->probe_resp_len - hlen; - if (!bcon->assocresp_ies) { - bcon->assocresp_ies = bcon->proberesp_ies; - bcon->assocresp_ies_len = bcon->proberesp_ies_len; - } - - return 1; -} - /* internal functions for device reset and starting AP */ static int _wil_cfg80211_set_ies(struct wiphy *wiphy, struct cfg80211_beacon_data *bcon) { int rc; struct wil6210_priv *wil = wiphy_to_wil(wiphy); + u16 len = 0, proberesp_len = 0; + u8 *ies = NULL, *proberesp = NULL; + + if (bcon->probe_resp) { + struct ieee80211_mgmt *f = + (struct ieee80211_mgmt *)bcon->probe_resp; + size_t hlen = offsetof(struct ieee80211_mgmt, + u.probe_resp.variable); + proberesp = f->u.probe_resp.variable; + proberesp_len = bcon->probe_resp_len - hlen; + } + rc = _wil_cfg80211_merge_extra_ies(proberesp, + proberesp_len, + bcon->proberesp_ies, + bcon->proberesp_ies_len, + &ies, &len); - rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, - bcon->proberesp_ies); if (rc) - return rc; + goto out; - rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, - bcon->assocresp_ies); + rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, len, ies); + if (rc) + goto out; + + if (bcon->assocresp_ies) + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, + bcon->assocresp_ies_len, bcon->assocresp_ies); + else + rc = wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, len, ies); #if 0 /* to use beacon IE's, remove this #if 0 */ if (rc) - return rc; + goto out; rc = wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->tail_len, bcon->tail); #endif - +out: + kfree(ies); return rc; } @@ -823,11 +896,6 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, wil_dbg_misc(wil, "%s()\n", __func__); wil_print_bcon_data(bcon); - if (wil_fix_bcon(wil, bcon)) { - wil_dbg_misc(wil, "Fixed bcon\n"); - wil_print_bcon_data(bcon); - } - if (bcon->tail && cfg80211_find_ie(WLAN_EID_RSN, bcon->tail, bcon->tail_len)) @@ -900,11 +968,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, wil_print_bcon_data(bcon); wil_print_crypto(wil, crypto); - if (wil_fix_bcon(wil, bcon)) { - wil_dbg_misc(wil, "Fixed bcon\n"); - wil_print_bcon_data(bcon); - } - rc = _wil_cfg80211_start_ap(wiphy, ndev, info->ssid, info->ssid_len, info->privacy, info->beacon_interval, channel->hw_value, -- cgit v1.2.3 From 640751ac30b2ddfbe06ad712aa7d57a8d89a7eef Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Sun, 17 Jan 2016 12:39:08 +0200 Subject: wil6210: handle tx completions only if vring tx data is enabled wil_vring_fini_tx is called in disconnect in order to free the allocated vrings. wil_vring_fini_tx is disabling the vring_tx_data before napi_synchronize is called in order to avoid the tx handling of this vring, while wil_vring_free is called only after napi finished the current handling of the tx completed packets. Due to that, in case of disconnect, wil6210_netdev_poll_tx can be called when vring->va is not NULL but vring_tx_data[i]->enabled is already disabled. This patch checks vring_tx_data[i]->enabled in wil6210_netdev_poll_tx to prevent handling of disabled vrings. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/netdev.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 56aaa2d4fb0e..ecc3c1bdae4b 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -108,8 +108,9 @@ static int wil6210_netdev_poll_tx(struct napi_struct *napi, int budget) /* always process ALL Tx complete, regardless budget - it is fast */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { struct vring *vring = &wil->vring_tx[i]; + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; - if (!vring->va) + if (!vring->va || !txdata->enabled) continue; tx_done += wil_tx_complete(wil, i); -- cgit v1.2.3 From b729aaf066b013458e95cd6c69a5e371eb9ce355 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Sun, 17 Jan 2016 12:39:09 +0200 Subject: wil6210: find TX vring only if vring_tx_data is enabled In TX vring allocation, vring_tx->va is allocated before WMI command to configure the vring is sent to the device. As the WMI command can take time to complete, it can lead to scenarios where vring_tx->va is not NULL but vring is still not enabled. This patch adds a check that vring_tx_data is enabled before returning a valid TX vring. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/txrx.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 7887e6cfd817..c22ee8b3ed9b 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -911,10 +911,11 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, continue; if (wil->vring2cid_tid[i][0] == cid) { struct vring *v = &wil->vring_tx[i]; + struct vring_tx_data *txdata = &wil->vring_tx_data[i]; wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n", __func__, eth->h_dest, i); - if (v->va) { + if (v->va && txdata->enabled) { return v; } else { wil_dbg_txrx(wil, "vring[%d] not valid\n", i); @@ -935,6 +936,7 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, struct vring *v; int i; u8 cid; + struct vring_tx_data *txdata; /* In the STA mode, it is expected to have only 1 VRING * for the AP we connected to. @@ -942,7 +944,8 @@ static struct vring *wil_find_tx_vring_sta(struct wil6210_priv *wil, */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; - if (!v->va) + txdata = &wil->vring_tx_data[i]; + if (!v->va || !txdata->enabled) continue; cid = wil->vring2cid_tid[i][0]; @@ -978,12 +981,14 @@ static struct vring *wil_find_tx_bcast_1(struct wil6210_priv *wil, struct sk_buff *skb) { struct vring *v; + struct vring_tx_data *txdata; int i = wil->bcast_vring; if (i < 0) return NULL; v = &wil->vring_tx[i]; - if (!v->va) + txdata = &wil->vring_tx_data[i]; + if (!v->va || !txdata->enabled) return NULL; if (!wil->vring_tx_data[i].dot1x_open && (skb->protocol != cpu_to_be16(ETH_P_PAE))) @@ -1010,11 +1015,13 @@ static struct vring *wil_find_tx_bcast_2(struct wil6210_priv *wil, u8 cid; struct ethhdr *eth = (void *)skb->data; char *src = eth->h_source; + struct vring_tx_data *txdata; /* find 1-st vring eligible for data */ for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) { v = &wil->vring_tx[i]; - if (!v->va) + txdata = &wil->vring_tx_data[i]; + if (!v->va || !txdata->enabled) continue; cid = wil->vring2cid_tid[i][0]; -- cgit v1.2.3 From 0916d9f2b6bea53a830ae5061b63a515c04da62e Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Sun, 17 Jan 2016 12:39:10 +0200 Subject: wil6210: handle multiple connect/disconnect events In the current solution wil6210 configures the vring in a worker and holds only one pending CID. This implementation may lead to race conditions between connect and disconnect events of multiple stations or fast connect/disconnect events of the same station. In order to allow the removal of the connect worker and handling of WMI_VRING_CFG_DONE_EVENTID in the connect event, the WMI replies that provide the reply in a given buffer needs to be handled immediately in the WMI event interrupt thread. To prevent deadlocks, WMI replies that requires additional handling are still handled via the events list. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 81 +++++++-------- drivers/net/wireless/ath/wil6210/txrx.c | 3 + drivers/net/wireless/ath/wil6210/wil6210.h | 8 +- drivers/net/wireless/ath/wil6210/wmi.c | 157 +++++++++++++++++++++-------- 4 files changed, 156 insertions(+), 93 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index b39f0bfc591e..0652efed6b5d 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -155,7 +155,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) if (sta->status != wil_sta_unused) { if (!from_event) - wmi_disconnect_sta(wil, sta->addr, reason_code); + wmi_disconnect_sta(wil, sta->addr, reason_code, true); switch (wdev->iftype) { case NL80211_IFTYPE_AP: @@ -195,8 +195,8 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid, struct wireless_dev *wdev = wil->wdev; might_sleep(); - wil_dbg_misc(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, - reason_code, from_event ? "+" : "-"); + wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid, + reason_code, from_event ? "+" : "-"); /* Cases are: * - disconnect single STA, still connected @@ -258,13 +258,16 @@ static void wil_disconnect_worker(struct work_struct *work) static void wil_connect_timer_fn(ulong x) { struct wil6210_priv *wil = (void *)x; + bool q; - wil_dbg_misc(wil, "Connect timeout\n"); + wil_err(wil, "Connect timeout detected, disconnect station\n"); /* reschedule to thread context - disconnect won't - * run from atomic context + * run from atomic context. + * queue on wmi_wq to prevent race with connect event. */ - schedule_work(&wil->disconnect_worker); + q = queue_work(wil->wmi_wq, &wil->disconnect_worker); + wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q); } static void wil_scan_timer_fn(ulong x) @@ -369,6 +372,32 @@ static int wil_find_free_vring(struct wil6210_priv *wil) return -EINVAL; } +int wil_tx_init(struct wil6210_priv *wil, int cid) +{ + int rc = -EINVAL, ringid; + + if (cid < 0) { + wil_err(wil, "No connection pending\n"); + goto out; + } + ringid = wil_find_free_vring(wil); + if (ringid < 0) { + wil_err(wil, "No free vring found\n"); + goto out; + } + + wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", + cid, ringid); + + rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); + if (rc) + wil_err(wil, "wil_vring_init_tx for CID %d vring %d failed\n", + cid, ringid); + +out: + return rc; +} + int wil_bcast_init(struct wil6210_priv *wil) { int ri = wil->bcast_vring, rc; @@ -399,41 +428,6 @@ void wil_bcast_fini(struct wil6210_priv *wil) wil_vring_fini_tx(wil, ri); } -static void wil_connect_worker(struct work_struct *work) -{ - int rc, cid, ringid; - struct wil6210_priv *wil = container_of(work, struct wil6210_priv, - connect_worker); - struct net_device *ndev = wil_to_ndev(wil); - - mutex_lock(&wil->mutex); - - cid = wil->pending_connect_cid; - if (cid < 0) { - wil_err(wil, "No connection pending\n"); - goto out; - } - ringid = wil_find_free_vring(wil); - if (ringid < 0) { - wil_err(wil, "No free vring found\n"); - goto out; - } - - wil_dbg_wmi(wil, "Configure for connection CID %d vring %d\n", - cid, ringid); - - rc = wil_vring_init_tx(wil, ringid, 1 << tx_ring_order, cid, 0); - wil->pending_connect_cid = -1; - if (rc == 0) { - wil->sta[cid].status = wil_sta_connected; - netif_tx_wake_all_queues(ndev); - } else { - wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true); - } -out: - mutex_unlock(&wil->mutex); -} - int wil_priv_init(struct wil6210_priv *wil) { uint i; @@ -453,12 +447,10 @@ int wil_priv_init(struct wil6210_priv *wil) init_completion(&wil->wmi_ready); init_completion(&wil->wmi_call); - wil->pending_connect_cid = -1; wil->bcast_vring = -1; setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil); - INIT_WORK(&wil->connect_worker, wil_connect_worker); INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker); @@ -844,7 +836,6 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) } /* init after reset */ - wil->pending_connect_cid = -1; wil->ap_isolate = 0; reinit_completion(&wil->wmi_ready); reinit_completion(&wil->wmi_call); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index c22ee8b3ed9b..9680b970b863 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -794,6 +794,9 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, txdata->dot1x_open = false; txdata->enabled = 0; wil_vring_free(wil, vring, 1); + wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; + wil->vring2cid_tid[id][1] = 0; + out: return rc; diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 235e205ce2bc..1b8fa1d2bae9 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -581,12 +581,10 @@ struct wil6210_priv { struct workqueue_struct *wmi_wq; /* for deferred calls */ struct work_struct wmi_event_worker; struct workqueue_struct *wq_service; - struct work_struct connect_worker; struct work_struct disconnect_worker; struct work_struct fw_error_worker; /* for FW error recovery */ struct timer_list connect_timer; struct timer_list scan_timer; /* detect scan timeout */ - int pending_connect_cid; struct list_head pending_wmi_ev; /* * protect pending_wmi_ev @@ -756,7 +754,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); int wmi_p2p_cfg(struct wil6210_priv *wil, int channel); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason); +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, + bool full_disconnect); int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout); int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason); int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason); @@ -807,6 +806,7 @@ void wil_rx_fini(struct wil6210_priv *wil); int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, int cid, int tid); void wil_vring_fini_tx(struct wil6210_priv *wil, int id); +int wil_tx_init(struct wil6210_priv *wil, int cid); int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size); int wil_bcast_init(struct wil6210_priv *wil); void wil_bcast_fini(struct wil6210_priv *wil); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index e3ea74cdd4aa..e1a6cb8840ed 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -426,6 +426,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) const size_t assoc_req_ie_offset = sizeof(u16) * 2; /* capinfo(u16) + status_code(u16) + associd(u16) + IEs */ const size_t assoc_resp_ie_offset = sizeof(u16) * 3; + int rc; if (len < sizeof(*evt)) { wil_err(wil, "Connect event too short : %d bytes\n", len); @@ -445,8 +446,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } ch = evt->channel + 1; - wil_dbg_wmi(wil, "Connect %pM channel [%d] cid %d\n", - evt->bssid, ch, evt->cid); + wil_info(wil, "Connect %pM channel [%d] cid %d\n", + evt->bssid, ch, evt->cid); wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1, evt->assoc_info, len - sizeof(*evt), true); @@ -468,20 +469,67 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) assoc_resp_ielen = 0; } + mutex_lock(&wil->mutex); + if (test_bit(wil_status_resetting, wil->status) || + !test_bit(wil_status_fwready, wil->status)) { + wil_err(wil, "status_resetting, cancel connect event, CID %d\n", + evt->cid); + mutex_unlock(&wil->mutex); + /* no need for cleanup, wil_reset will do that */ + return; + } + if ((wdev->iftype == NL80211_IFTYPE_STATION) || (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { if (!test_bit(wil_status_fwconnecting, wil->status)) { wil_err(wil, "Not in connecting state\n"); + mutex_unlock(&wil->mutex); return; } del_timer_sync(&wil->connect_timer); - cfg80211_connect_result(ndev, evt->bssid, - assoc_req_ie, assoc_req_ielen, - assoc_resp_ie, assoc_resp_ielen, - WLAN_STATUS_SUCCESS, GFP_KERNEL); + } + + /* FIXME FW can transmit only ucast frames to peer */ + /* FIXME real ring_id instead of hard coded 0 */ + ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); + wil->sta[evt->cid].status = wil_sta_conn_pending; + rc = wil_tx_init(wil, evt->cid); + if (rc) { + wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n", + __func__, evt->cid, rc); + wmi_disconnect_sta(wil, wil->sta[evt->cid].addr, + WLAN_REASON_UNSPECIFIED, false); + } else { + wil_info(wil, "%s: successful connection to CID %d\n", + __func__, evt->cid); + } + + if ((wdev->iftype == NL80211_IFTYPE_STATION) || + (wdev->iftype == NL80211_IFTYPE_P2P_CLIENT)) { + if (rc) { + netif_tx_stop_all_queues(ndev); + netif_carrier_off(ndev); + wil_err(wil, + "%s: cfg80211_connect_result with failure\n", + __func__); + cfg80211_connect_result(ndev, evt->bssid, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + goto out; + } else { + cfg80211_connect_result(ndev, evt->bssid, + assoc_req_ie, assoc_req_ielen, + assoc_resp_ie, assoc_resp_ielen, + WLAN_STATUS_SUCCESS, + GFP_KERNEL); + } } else if ((wdev->iftype == NL80211_IFTYPE_AP) || (wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + if (rc) + goto out; + memset(&sinfo, 0, sizeof(sinfo)); sinfo.generation = wil->sinfo_gen++; @@ -492,17 +540,21 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len) } cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL); + } else { + wil_err(wil, "%s: unhandled iftype %d for CID %d\n", + __func__, wdev->iftype, evt->cid); + goto out; } - clear_bit(wil_status_fwconnecting, wil->status); - set_bit(wil_status_fwconnected, wil->status); - /* FIXME FW can transmit only ucast frames to peer */ - /* FIXME real ring_id instead of hard coded 0 */ - ether_addr_copy(wil->sta[evt->cid].addr, evt->bssid); - wil->sta[evt->cid].status = wil_sta_conn_pending; + wil->sta[evt->cid].status = wil_sta_connected; + set_bit(wil_status_fwconnected, wil->status); + netif_tx_wake_all_queues(ndev); - wil->pending_connect_cid = evt->cid; - queue_work(wil->wq_service, &wil->connect_worker); +out: + if (rc) + wil->sta[evt->cid].status = wil_sta_unused; + clear_bit(wil_status_fwconnecting, wil->status); + mutex_unlock(&wil->mutex); } static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, @@ -511,8 +563,8 @@ static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, struct wmi_disconnect_event *evt = d; u16 reason_code = le16_to_cpu(evt->protocol_reason_status); - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", - evt->bssid, reason_code, evt->disconnect_reason); + wil_info(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + evt->bssid, reason_code, evt->disconnect_reason); wil->sinfo_gen++; @@ -727,6 +779,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) void __iomem *src; ulong flags; unsigned n; + unsigned int num_immed_reply = 0; if (!test_bit(wil_status_mbox_ready, wil->status)) { wil_err(wil, "Reset in progress. Cannot handle WMI event\n"); @@ -736,6 +789,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) for (n = 0;; n++) { u16 len; bool q; + bool immed_reply = false; r->head = wil_r(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, rx.head)); @@ -784,6 +838,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil) struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; u16 id = le16_to_cpu(wmi->id); u32 tstamp = le32_to_cpu(wmi->timestamp); + if (wil->reply_id && wil->reply_id == id) { + if (wil->reply_buf) { + memcpy(wil->reply_buf, wmi, + min(len, wil->reply_size)); + immed_reply = true; + } + } wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", id, wmi->mid, tstamp); @@ -799,15 +860,24 @@ void wmi_recv_cmd(struct wil6210_priv *wil) wil_w(wil, RGF_MBOX + offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail); - /* add to the pending list */ - spin_lock_irqsave(&wil->wmi_ev_lock, flags); - list_add_tail(&evt->list, &wil->pending_wmi_ev); - spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); - q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); - wil_dbg_wmi(wil, "queue_work -> %d\n", q); + if (immed_reply) { + wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", + __func__, wil->reply_id); + kfree(evt); + num_immed_reply++; + complete(&wil->wmi_call); + } else { + /* add to the pending list */ + spin_lock_irqsave(&wil->wmi_ev_lock, flags); + list_add_tail(&evt->list, &wil->pending_wmi_ev); + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); + q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); + wil_dbg_wmi(wil, "queue_work -> %d\n", q); + } } /* normally, 1 event per IRQ should be processed */ - wil_dbg_wmi(wil, "%s -> %d events queued\n", __func__, n); + wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__, + n - num_immed_reply, num_immed_reply); } int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, @@ -1184,7 +1254,8 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) return 0; } -int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) +int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason, + bool full_disconnect) { int rc; u16 reason_code; @@ -1208,19 +1279,20 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason) return rc; } - /* call event handler manually after processing wmi_call, - * to avoid deadlock - disconnect event handler acquires wil->mutex - * while it is already held here - */ - reason_code = le16_to_cpu(reply.evt.protocol_reason_status); - - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", - reply.evt.bssid, reason_code, - reply.evt.disconnect_reason); + if (full_disconnect) { + /* call event handler manually after processing wmi_call, + * to avoid deadlock - disconnect event handler acquires + * wil->mutex while it is already held here + */ + reason_code = le16_to_cpu(reply.evt.protocol_reason_status); - wil->sinfo_gen++; - wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", + reply.evt.bssid, reason_code, + reply.evt.disconnect_reason); + wil->sinfo_gen++; + wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); + } return 0; } @@ -1348,14 +1420,11 @@ static void wmi_event_handle(struct wil6210_priv *wil, id, wil->reply_id); /* check if someone waits for this event */ if (wil->reply_id && wil->reply_id == id) { - if (wil->reply_buf) { - memcpy(wil->reply_buf, wmi, - min(len, wil->reply_size)); - } else { - wmi_evt_call_handler(wil, id, evt_data, - len - sizeof(*wmi)); - } - wil_dbg_wmi(wil, "Complete WMI 0x%04x\n", id); + WARN_ON(wil->reply_buf); + wmi_evt_call_handler(wil, id, evt_data, + len - sizeof(*wmi)); + wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n", + __func__, id); complete(&wil->wmi_call); return; } -- cgit v1.2.3 From 424aecaa797901708e7f260b642d27b077cdb491 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 14 Jan 2016 03:15:13 +0100 Subject: ath9k_hw: add low power tx gain table for AR953x Used in some newer TP-Link AR9533 devices. Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_hw.c | 3 ++ drivers/net/wireless/ath/ath9k/ar953x_initvals.h | 65 ++++++++++++++++++++++++ 2 files changed, 68 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c index 8b238c15916d..2fe12b0de5b4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c @@ -698,6 +698,9 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah) else if (AR_SREV_9340(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9340Modes_low_ob_db_tx_gain_table_1p0); + else if (AR_SREV_9531_11(ah)) + INIT_INI_ARRAY(&ah->iniModesTxGain, + qca953x_1p1_modes_no_xpa_low_power_tx_gain_table); else if (AR_SREV_9485_11_OR_LATER(ah)) INIT_INI_ARRAY(&ah->iniModesTxGain, ar9485Modes_low_ob_db_tx_gain_1_1); diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h index 6fc0d07e5ec6..c0b90daa3e3d 100644 --- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h @@ -757,6 +757,71 @@ static const u32 qca953x_1p1_modes_xpa_tx_gain_table[][2] = { {0x00016448, 0x6c927a70}, }; +static const u32 qca953x_1p1_modes_no_xpa_low_power_tx_gain_table[][2] = { + /* Addr allmodes */ + {0x0000a2dc, 0xfff55592}, + {0x0000a2e0, 0xfff99924}, + {0x0000a2e4, 0xfffe1e00}, + {0x0000a2e8, 0xffffe000}, + {0x0000a410, 0x000050d6}, + {0x0000a500, 0x00000069}, + {0x0000a504, 0x0400006b}, + {0x0000a508, 0x0800006d}, + {0x0000a50c, 0x0c000269}, + {0x0000a510, 0x1000026b}, + {0x0000a514, 0x1400026d}, + {0x0000a518, 0x18000669}, + {0x0000a51c, 0x1c00066b}, + {0x0000a520, 0x1d000a68}, + {0x0000a524, 0x21000a6a}, + {0x0000a528, 0x25000a6c}, + {0x0000a52c, 0x29000a6e}, + {0x0000a530, 0x2d0012a9}, + {0x0000a534, 0x310012ab}, + {0x0000a538, 0x350012ad}, + {0x0000a53c, 0x39001b0a}, + {0x0000a540, 0x3d001b0c}, + {0x0000a544, 0x41001b0e}, + {0x0000a548, 0x43001bae}, + {0x0000a54c, 0x45001914}, + {0x0000a550, 0x47001916}, + {0x0000a554, 0x49001b96}, + {0x0000a558, 0x49001b96}, + {0x0000a55c, 0x49001b96}, + {0x0000a560, 0x49001b96}, + {0x0000a564, 0x49001b96}, + {0x0000a568, 0x49001b96}, + {0x0000a56c, 0x49001b96}, + {0x0000a570, 0x49001b96}, + {0x0000a574, 0x49001b96}, + {0x0000a578, 0x49001b96}, + {0x0000a57c, 0x49001b96}, + {0x0000a600, 0x00000000}, + {0x0000a604, 0x00000000}, + {0x0000a608, 0x00000000}, + {0x0000a60c, 0x00000000}, + {0x0000a610, 0x00000000}, + {0x0000a614, 0x00000000}, + {0x0000a618, 0x00804201}, + {0x0000a61c, 0x01408201}, + {0x0000a620, 0x01408502}, + {0x0000a624, 0x01408502}, + {0x0000a628, 0x01408502}, + {0x0000a62c, 0x01408502}, + {0x0000a630, 0x01408502}, + {0x0000a634, 0x01408502}, + {0x0000a638, 0x01408502}, + {0x0000a63c, 0x01408502}, + {0x0000b2dc, 0xfff55592}, + {0x0000b2e0, 0xfff99924}, + {0x0000b2e4, 0xfffe1e00}, + {0x0000b2e8, 0xffffe000}, + {0x00016044, 0x044922db}, + {0x00016048, 0x6c927a70}, + {0x00016444, 0x044922db}, + {0x00016448, 0x6c927a70}, +}; + static const u32 qca953x_2p0_baseband_core[][2] = { /* Addr allmodes */ {0x00009800, 0xafe68e30}, -- cgit v1.2.3 From 344cd850b55e9dfd4f3ec1ba564f8edbc8136aca Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 15 Jan 2016 14:41:30 +0100 Subject: ath9k: Drop useless const on chanctx_event_delta() return type drivers/net/wireless/ath/ath9k/channel.c:230: warning: type qualifiers ignored on function return type Signed-off-by: Geert Uytterhoeven Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/channel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c index 0bcb82abd094..319cb5f25f58 100644 --- a/drivers/net/wireless/ath/ath9k/channel.c +++ b/drivers/net/wireless/ath/ath9k/channel.c @@ -226,7 +226,7 @@ static const char *chanctx_state_string(enum ath_chanctx_state state) } } -static const u32 chanctx_event_delta(struct ath_softc *sc) +static u32 chanctx_event_delta(struct ath_softc *sc) { u64 ms; struct timespec ts, *old; -- cgit v1.2.3 From c1cab1df9b651d999922e0f04d01dc046121a237 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 18 Jan 2016 09:33:46 +0800 Subject: ath9k: avoid ANI restart if no trigger Fixes commit 54da20d83f0e ("ath9k_hw: improve ANI processing and rx desensitizing parameters") Call ath9k_ani_restart() only when the phy error rate reach the ANI immunity threshold. Sync the logic with internal code base. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ani.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index 25e45e4d1a60..aa82e1326fff 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -450,7 +450,9 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) } else if (cckPhyErrRate > ah->config.cck_trig_high) { ath9k_hw_ani_cck_err_trigger(ah); aniState->ofdmsTurn = true; - } + } else + return; + ath9k_ani_restart(ah); } } -- cgit v1.2.3 From 66b533d5adb3eb0cd111b07e516ab13a47ed34f5 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 18 Jan 2016 09:33:47 +0800 Subject: ath9k: clean up ANI per-channel pointer checking commit c24bd3620c50 ("ath9k: Do not maintain ANI state per-channel") removed per-channel handling, the code to check 'curchan' also should be removed as never used. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ani.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c index aa82e1326fff..815efe9fd208 100644 --- a/drivers/net/wireless/ath/ath9k/ani.c +++ b/drivers/net/wireless/ath/ath9k/ani.c @@ -126,12 +126,8 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah, static void ath9k_ani_restart(struct ath_hw *ah) { - struct ar5416AniState *aniState; - - if (!ah->curchan) - return; + struct ar5416AniState *aniState = &ah->ani; - aniState = &ah->ani; aniState->listenTime = 0; ENABLE_REGWRITE_BUFFER(ah); @@ -221,12 +217,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel, static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) { - struct ar5416AniState *aniState; - - if (!ah->curchan) - return; - - aniState = &ah->ani; + struct ar5416AniState *aniState = &ah->ani; if (aniState->ofdmNoiseImmunityLevel < ATH9K_ANI_OFDM_MAX_LEVEL) ath9k_hw_set_ofdm_nil(ah, aniState->ofdmNoiseImmunityLevel + 1, false); @@ -281,12 +272,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel, static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) { - struct ar5416AniState *aniState; - - if (!ah->curchan) - return; - - aniState = &ah->ani; + struct ar5416AniState *aniState = &ah->ani; if (aniState->cckNoiseImmunityLevel < ATH9K_ANI_CCK_MAX_LEVEL) ath9k_hw_set_cck_nil(ah, aniState->cckNoiseImmunityLevel + 1, @@ -299,9 +285,7 @@ static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) */ static void ath9k_hw_ani_lower_immunity(struct ath_hw *ah) { - struct ar5416AniState *aniState; - - aniState = &ah->ani; + struct ar5416AniState *aniState = &ah->ani; /* lower OFDM noise immunity */ if (aniState->ofdmNoiseImmunityLevel > 0 && @@ -329,7 +313,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning) struct ath_common *common = ath9k_hw_common(ah); int ofdm_nil, cck_nil; - if (!ah->curchan) + if (!chan) return; BUG_ON(aniState == NULL); @@ -416,14 +400,10 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah) void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan) { - struct ar5416AniState *aniState; + struct ar5416AniState *aniState = &ah->ani; struct ath_common *common = ath9k_hw_common(ah); u32 ofdmPhyErrRate, cckPhyErrRate; - if (!ah->curchan) - return; - - aniState = &ah->ani; if (!ath9k_hw_ani_read_counters(ah)) return; -- cgit v1.2.3 From 3f6cc4e57d304f552abc2bdba716519c5e18b442 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 18 Jan 2016 09:33:48 +0800 Subject: ath9k: do not reset while BB panic(0x4000409) on ar9561 BB panic(0x4000409) observed while AP enabling/disabling bursting. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_phy.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 201425e7f9cb..abd964691d8c 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -2071,7 +2071,8 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah) * to be disabled. * * 0x04000409: Packet stuck on receive. - * Full chip reset is required for all chips except AR9340. + * Full chip reset is required for all chips except + * AR9340, AR9531 and AR9561. */ /* @@ -2100,7 +2101,7 @@ bool ar9003_hw_bb_watchdog_check(struct ath_hw *ah) case 0x04000b09: return true; case 0x04000409: - if (AR_SREV_9340(ah) || AR_SREV_9531(ah)) + if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9561(ah)) return false; else return true; -- cgit v1.2.3 From 1542bc37842d5d51e4aa71286a08be9615eba884 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 18 Jan 2016 09:33:49 +0800 Subject: ath9k: fix inconsistent use of tab and space in indentation Minor changes for indenting. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_eeprom.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 8b4561e8ce1a..54ed2f72d35e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -5485,11 +5485,11 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, AR9300_PAPRD_SCALE_1); else { if (chan->channel >= 5700) - return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), - AR9300_PAPRD_SCALE_1); + return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20), + AR9300_PAPRD_SCALE_1); else if (chan->channel >= 5400) return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), - AR9300_PAPRD_SCALE_2); + AR9300_PAPRD_SCALE_2); else return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40), AR9300_PAPRD_SCALE_1); -- cgit v1.2.3 From 466b0f0208508b08eb59907fac0958d6da7fa3e2 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Mon, 18 Jan 2016 09:33:50 +0800 Subject: ath9k: fix data bus error on ar9300 and ar9580 One crash issue be found on ar9300: RTC_RC reg read leads crash, leading the data bus error, due to RTC_RC reg write not happen properly. Warm Reset trigger in continuous beacon stuck for one of the customer for other chip, noticed the MAC was stuck in RTC reset. After analysis noticed DMA did not complete when RTC was put in reset. So, before resetting the MAC need to make sure there are no pending DMA transactions because this reset does not reset all parts of the chip. The 12th and 11th bit of MAC _DMA_CFG register used to do that. 12 cfg_halt_ack 0x0 0 DMA has not yet halted 1 DMA has halted 11 cfg_halt_req 0x0 0 DMA logic operates normally 1 Request DMA logic to stop so software can reset the MAC The Bit [12] of this register indicates when the halt has taken effect or not. the DMA halt IS NOT recoverable; once software sets bit [11] to request a DMA halt, software must wait for bit [12] to be set and reset the MAC. So, the same thing we implemented for ar9580 chip. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/hw.c | 10 ++++++++++ drivers/net/wireless/ath/ath9k/reg.h | 4 +++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 257f46ed4a04..e7a31016f370 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1368,6 +1368,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) if (ath9k_hw_mci_is_enabled(ah)) ar9003_mci_check_gpm_offset(ah); + /* DMA HALT added to resolve ar9300 and ar9580 bus error during + * RTC_RC reg read + */ + if (AR_SREV_9300(ah) || AR_SREV_9580(ah)) { + REG_SET_BIT(ah, AR_CFG, AR_CFG_HALT_REQ); + ath9k_hw_wait(ah, AR_CFG, AR_CFG_HALT_ACK, AR_CFG_HALT_ACK, + 20 * AH_WAIT_TIMEOUT); + REG_CLR_BIT(ah, AR_CFG, AR_CFG_HALT_REQ); + } + REG_WRITE(ah, AR_RTC_RC, rst_flags); REGWRITE_BUFFER_FLUSH(ah); diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h index caba54ddad25..c8d35febaf0f 100644 --- a/drivers/net/wireless/ath/ath9k/reg.h +++ b/drivers/net/wireless/ath/ath9k/reg.h @@ -34,8 +34,10 @@ #define AR_CFG_SWRG 0x00000010 #define AR_CFG_AP_ADHOC_INDICATION 0x00000020 #define AR_CFG_PHOK 0x00000100 -#define AR_CFG_CLK_GATE_DIS 0x00000400 #define AR_CFG_EEBS 0x00000200 +#define AR_CFG_CLK_GATE_DIS 0x00000400 +#define AR_CFG_HALT_REQ 0x00000800 +#define AR_CFG_HALT_ACK 0x00001000 #define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000 #define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17 -- cgit v1.2.3 From df94e702277e14889af5eef5fbfc3a33261c8b33 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 21 Jan 2016 14:13:23 +0100 Subject: ath10k: rename some HTT events New names make a bit more sense. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.c | 8 ++++---- drivers/net/wireless/ath/ath10k/htt.h | 8 ++++---- drivers/net/wireless/ath/ath10k/htt_rx.c | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c index 3e6ba63dfdff..7561f22f10f9 100644 --- a/drivers/net/wireless/ath/ath10k/htt.c +++ b/drivers/net/wireless/ath/ath10k/htt.c @@ -131,12 +131,12 @@ static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = { [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF, [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] = HTT_T2H_MSG_TYPE_TX_FETCH_IND, - [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF] = - HTT_T2H_MSG_TYPE_TX_FETCH_CONF, + [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] = + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD, - [HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND] = - HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, + [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] = + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, }; int ath10k_htt_connect(struct ath10k_htt *htt) diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 47ca048feaf0..edd3b9070de9 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -413,10 +413,10 @@ enum htt_10_4_t2h_msg_type { HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14, HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15, HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16, - HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONF = 0x17, + HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17, HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18, /* 0x19 to 0x2f are reserved */ - HTT_10_4_T2H_MSG_TYPE_TX_LOW_LATENCY_IND = 0x30, + HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30, /* keep this last */ HTT_10_4_T2H_NUM_MSGS }; @@ -449,8 +449,8 @@ enum htt_t2h_msg_type { HTT_T2H_MSG_TYPE_TEST, HTT_T2H_MSG_TYPE_EN_STATS, HTT_T2H_MSG_TYPE_TX_FETCH_IND, - HTT_T2H_MSG_TYPE_TX_FETCH_CONF, - HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND, + HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM, + HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND, /* keep this last */ HTT_T2H_NUM_MSGS }; diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 3079434b5d9b..84abe4f25c73 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2125,8 +2125,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; case HTT_T2H_MSG_TYPE_EN_STATS: case HTT_T2H_MSG_TYPE_TX_FETCH_IND: - case HTT_T2H_MSG_TYPE_TX_FETCH_CONF: - case HTT_T2H_MSG_TYPE_TX_LOW_LATENCY_IND: + case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: + case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: default: ath10k_warn(ar, "htt event (%d) not handled\n", resp->hdr.msg_type); -- cgit v1.2.3 From 22e6b3bc5d9668dc711665d255efad89c527b4d6 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 21 Jan 2016 14:13:24 +0100 Subject: ath10k: add new htt definitions These definitions are associated with some improvements upcomming for 10.4 and QCA99X0. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 153 +++++++++++++++++++++++++++++++++- 1 file changed, 150 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index edd3b9070de9..0c5628dafabf 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -52,6 +52,7 @@ enum htt_h2t_msg_type { /* host-to-target */ /* This command is used for sending management frames in HTT < 3.0. * HTT >= 3.0 uses TX_FRM for everything. */ HTT_H2T_MSG_TYPE_MGMT_TX = 7, + HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11, HTT_H2T_NUM_MSGS /* keep this last */ }; @@ -1306,9 +1307,43 @@ struct htt_frag_desc_bank_id { * so we use a conservatively safe value for now */ #define HTT_FRAG_DESC_BANK_MAX 4 -#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 -#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 -#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP (1 << 2) +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03 +#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0 +#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4) +#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4 + +enum htt_q_depth_type { + HTT_Q_DEPTH_TYPE_BYTES = 0, + HTT_Q_DEPTH_TYPE_MSDUS = 1, +}; + +#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \ + TARGET_10_4_NUM_VDEVS) +#define HTT_TX_Q_STATE_NUM_TIDS 8 +#define HTT_TX_Q_STATE_ENTRY_SIZE 1 +#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0 + +/** + * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config + * + * Defines host q state format and behavior. See htt_q_state. + * + * @record_size: Defines the size of each host q entry in bytes. In practice + * however firmware (at least 10.4.3-00191) ignores this host + * configuration value and uses hardcoded value of 1. + * @record_multiplier: This is valid only when q depth type is MSDUs. It + * defines the exponent for the power of 2 multiplication. + */ +struct htt_q_state_conf { + __le32 paddr; + __le16 num_peers; + __le16 num_tids; + u8 record_size; + u8 record_multiplier; + u8 pad[2]; +} __packed; struct htt_frag_desc_bank_cfg { u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */ @@ -1316,6 +1351,114 @@ struct htt_frag_desc_bank_cfg { u8 desc_size; __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX]; struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX]; + struct htt_q_state_conf q_state; +} __packed; + +#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128 +#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f +#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0 +#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0 +#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6 + +/** + * htt_q_state - shared between host and firmware via DMA + * + * This structure is used for the host to expose it's software queue state to + * firmware so that its rate control can schedule fetch requests for optimized + * performance. This is most notably used for MU-MIMO aggregation when multiple + * MU clients are connected. + * + * @count: Each element defines the host queue depth. When q depth type was + * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as: + * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and + * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as + * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 ** + * record_multiplier (see htt_q_state_conf). + * @map: Used by firmware to quickly check which host queues are not empty. It + * is a bitmap simply saying. + * @seq: Used by firmware to quickly check if the host queues were updated + * since it last checked. + * + * FIXME: Is the q_state map[] size calculation really correct? + */ +struct htt_q_state { + u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS]; + u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32]; + __le32 seq; +} __packed; + +#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff +#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0 +#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000 +#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12 + +struct htt_tx_fetch_record { + __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */ + __le16 num_msdus; + __le32 num_bytes; +} __packed; + +struct htt_tx_fetch_ind { + u8 pad0; + __le16 fetch_seq_num; + __le32 token; + __le16 num_resp_ids; + __le16 num_records; + struct htt_tx_fetch_record records[0]; + __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */ +} __packed; + +static inline void * +ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind) +{ + return (void *)&ind->records[le16_to_cpu(ind->num_records)]; +} + +struct htt_tx_fetch_resp { + u8 pad0; + __le16 resp_id; + __le16 fetch_seq_num; + __le16 num_records; + __le32 token; + struct htt_tx_fetch_record records[0]; +} __packed; + +struct htt_tx_fetch_confirm { + u8 pad0; + __le16 num_resp_ids; + __le32 resp_ids[0]; +} __packed; + +enum htt_tx_mode_switch_mode { + HTT_TX_MODE_SWITCH_PUSH = 0, + HTT_TX_MODE_SWITCH_PUSH_PULL = 1, +}; + +#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0) +#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe +#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1 + +#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003 +#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0 +#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc +#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2 + +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0 +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000 +#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12 + +struct htt_tx_mode_switch_record { + __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */ + __le16 num_max_msdus; +} __packed; + +struct htt_tx_mode_switch_ind { + u8 pad0; + __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */ + __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */ + u8 pad1[2]; + struct htt_tx_mode_switch_record records[0]; } __packed; union htt_rx_pn_t { @@ -1340,6 +1483,7 @@ struct htt_cmd { struct htt_oob_sync_req oob_sync_req; struct htt_aggr_conf aggr_conf; struct htt_frag_desc_bank_cfg frag_desc_bank_cfg; + struct htt_tx_fetch_resp tx_fetch_resp; }; } __packed; @@ -1364,6 +1508,9 @@ struct htt_resp { struct htt_rx_pn_ind rx_pn_ind; struct htt_rx_offload_ind rx_offload_ind; struct htt_rx_in_ord_ind rx_in_ord_ind; + struct htt_tx_fetch_ind tx_fetch_ind; + struct htt_tx_fetch_confirm tx_fetch_confirm; + struct htt_tx_mode_switch_ind tx_mode_switch_ind; }; } __packed; -- cgit v1.2.3 From 9b783763aa9623dedeed77bec158f7377cb87750 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 21 Jan 2016 14:13:25 +0100 Subject: ath10k: add new FW_FEATURE_PEER_FLOW_CONTROL This feature flag will be used for firmware which supports pull-push model where host shares it's software queue state with firmware and firmware generates fetch requests telling host which queues to dequeue tx from. Primary function of this is improved MU-MIMO performance with multiple clients. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 1 + drivers/net/wireless/ath/ath10k/core.h | 9 +++++++++ 2 files changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index b41eb3f4ee56..48a26206c5c1 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -217,6 +217,7 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode", [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca", [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp", + [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 921b86a4f257..5067a0ff4e89 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -512,6 +512,15 @@ enum ath10k_fw_features { /* Firmware supports management frame protection */ ATH10K_FW_FEATURE_MFP_SUPPORT = 12, + /* Firmware supports pull-push model where host shares it's software + * queue state with firmware and firmware generates fetch requests + * telling host which queues to dequeue tx from. + * + * Primary function of this is improved MU-MIMO performance with + * multiple clients. + */ + ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; -- cgit v1.2.3 From 575fc89500d37839eb24ecd258982b24edefed6f Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 21 Jan 2016 14:13:26 +0100 Subject: ath10k: clean up cont frag desc init code This makes the code easier to extend and re-use. While at it fix _warn to _err. Other than that there are no functional changes. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_tx.c | 58 ++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index b3adadb5f824..28b8d7af8506 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -97,6 +97,41 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) idr_remove(&htt->pending_tx, msdu_id); } +static void ath10k_htt_tx_free_cont_frag_desc(struct ath10k_htt *htt) +{ + size_t size; + + if (!htt->frag_desc.vaddr) + return; + + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + + dma_free_coherent(htt->ar->dev, + size, + htt->frag_desc.vaddr, + htt->frag_desc.paddr); +} + +static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!ar->hw_params.continuous_frag_desc) + return 0; + + size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); + htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, + &htt->frag_desc.paddr, + GFP_KERNEL); + if (!htt->frag_desc.vaddr) { + ath10k_err(ar, "failed to alloc fragment desc memory\n"); + return -ENOMEM; + } + + return 0; +} + int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; @@ -118,20 +153,12 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) goto free_idr_pending_tx; } - if (!ar->hw_params.continuous_frag_desc) - goto skip_frag_desc_alloc; - - size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); - htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, - &htt->frag_desc.paddr, - GFP_KERNEL); - if (!htt->frag_desc.vaddr) { - ath10k_warn(ar, "failed to alloc fragment desc memory\n"); - ret = -ENOMEM; + ret = ath10k_htt_tx_alloc_cont_frag_desc(htt); + if (ret) { + ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); goto free_txbuf; } -skip_frag_desc_alloc: return 0; free_txbuf: @@ -139,8 +166,10 @@ free_txbuf: sizeof(struct ath10k_htt_txbuf); dma_free_coherent(htt->ar->dev, size, htt->txbuf.vaddr, htt->txbuf.paddr); + free_idr_pending_tx: idr_destroy(&htt->pending_tx); + return ret; } @@ -174,12 +203,7 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) htt->txbuf.paddr); } - if (htt->frag_desc.vaddr) { - size = htt->max_num_pending_tx * - sizeof(struct htt_msdu_ext_desc); - dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, - htt->frag_desc.paddr); - } + ath10k_htt_tx_free_cont_frag_desc(htt); } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) -- cgit v1.2.3 From 9b15873628050fe59ebbfae200be7d50ce5e1491 Mon Sep 17 00:00:00 2001 From: Michal Kazior Date: Thu, 21 Jan 2016 14:13:27 +0100 Subject: ath10k: implement basic support for new tx path firmware This allows to use the new firmware which implements the new tx data path. Without this patch firmware supporting new tx path stops responding shortly after booting. This patch doesn't implement the entire pull-push logic available in the new firmware. This will be done in subsequent patches. Signed-off-by: Michal Kazior Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt.h | 8 +++ drivers/net/wireless/ath/ath10k/htt_rx.c | 4 +- drivers/net/wireless/ath/ath10k/htt_tx.c | 88 +++++++++++++++++++++++++++++--- 3 files changed, 91 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h index 0c5628dafabf..13391ea4422d 100644 --- a/drivers/net/wireless/ath/ath10k/htt.h +++ b/drivers/net/wireless/ath/ath10k/htt.h @@ -1665,6 +1665,14 @@ struct ath10k_htt { dma_addr_t paddr; struct ath10k_htt_txbuf *vaddr; } txbuf; + + struct { + struct htt_q_state *vaddr; + dma_addr_t paddr; + u16 num_peers; + u16 num_tids; + enum htt_q_depth_type type; + } tx_q_state; }; #define RX_HTT_HDR_STATUS_LEN 64 diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index 84abe4f25c73..cc957a625605 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2123,10 +2123,12 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; case HTT_T2H_MSG_TYPE_AGGR_CONF: break; - case HTT_T2H_MSG_TYPE_EN_STATS: case HTT_T2H_MSG_TYPE_TX_FETCH_IND: case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM: case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND: + /* TODO: Implement pull-push logic */ + break; + case HTT_T2H_MSG_TYPE_EN_STATS: default: ath10k_warn(ar, "htt event (%d) not handled\n", resp->hdr.msg_type); diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c index 28b8d7af8506..95acb727c068 100644 --- a/drivers/net/wireless/ath/ath10k/htt_tx.c +++ b/drivers/net/wireless/ath/ath10k/htt_tx.c @@ -132,6 +132,50 @@ static int ath10k_htt_tx_alloc_cont_frag_desc(struct ath10k_htt *htt) return 0; } +static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) + return; + + size = sizeof(*htt->tx_q_state.vaddr); + + dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); + kfree(htt->tx_q_state.vaddr); +} + +static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) +{ + struct ath10k *ar = htt->ar; + size_t size; + int ret; + + if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) + return 0; + + htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; + htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; + htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; + + size = sizeof(*htt->tx_q_state.vaddr); + htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); + if (!htt->tx_q_state.vaddr) + return -ENOMEM; + + htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, + size, DMA_TO_DEVICE); + ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); + if (ret) { + ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); + kfree(htt->tx_q_state.vaddr); + return -EIO; + } + + return 0; +} + int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; @@ -159,8 +203,17 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt) goto free_txbuf; } + ret = ath10k_htt_tx_alloc_txq(htt); + if (ret) { + ath10k_err(ar, "failed to alloc txq: %d\n", ret); + goto free_frag_desc; + } + return 0; +free_frag_desc: + ath10k_htt_tx_free_cont_frag_desc(htt); + free_txbuf: size = htt->max_num_pending_tx * sizeof(struct ath10k_htt_txbuf); @@ -203,6 +256,7 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt) htt->txbuf.paddr); } + ath10k_htt_tx_free_txq(htt); ath10k_htt_tx_free_cont_frag_desc(htt); } @@ -292,7 +346,9 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; + struct htt_frag_desc_bank_cfg *cfg; int ret, size; + u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; @@ -310,14 +366,30 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; - cmd->frag_desc_bank_cfg.info = 0; - cmd->frag_desc_bank_cfg.num_banks = 1; - cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); - cmd->frag_desc_bank_cfg.bank_base_addrs[0] = - __cpu_to_le32(htt->frag_desc.paddr); - cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; - cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = - __cpu_to_le16(htt->max_num_pending_tx - 1); + + info = 0; + info |= SM(htt->tx_q_state.type, + HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); + + if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) + info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; + + cfg = &cmd->frag_desc_bank_cfg; + cfg->info = info; + cfg->num_banks = 1; + cfg->desc_size = sizeof(struct htt_msdu_ext_desc); + cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); + cfg->bank_id[0].bank_min_id = 0; + cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - + 1); + + cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); + cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); + cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); + cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; + cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; + + ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { -- cgit v1.2.3 From f52f517189dea69614bbee9ec5ab2fe366905b16 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:22 +0530 Subject: ath10k: make some of ath10k_pci_* func reusable Some of static functions present in pci.c file are reusable in ahb (qca4019) case. Remove static word for those reusable functions and have those function prototype declaration in pci.h file. So that, pci.h header file can be included in ahb module and reused. There is no functionality changes done in this patch. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 63 +++++++++++++++++------------------ drivers/net/wireless/ath/ath10k/pci.h | 32 ++++++++++++++++++ 2 files changed, 63 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index ee925c618535..956e548fd4a7 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -94,7 +94,6 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static int ath10k_pci_cold_reset(struct ath10k *ar); static int ath10k_pci_safe_chip_reset(struct ath10k *ar); -static int ath10k_pci_wait_for_target_init(struct ath10k *ar); static int ath10k_pci_init_irq(struct ath10k *ar); static int ath10k_pci_deinit_irq(struct ath10k *ar); static int ath10k_pci_request_irq(struct ath10k *ar); @@ -687,7 +686,7 @@ void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val) ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val); } -static bool ath10k_pci_irq_pending(struct ath10k *ar) +bool ath10k_pci_irq_pending(struct ath10k *ar) { u32 cause; @@ -700,7 +699,7 @@ static bool ath10k_pci_irq_pending(struct ath10k *ar) return false; } -static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) +void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) { /* IMPORTANT: INTR_CLR register has to be set after * INTR_ENABLE is set to 0, otherwise interrupt can not be @@ -716,7 +715,7 @@ static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar) PCIE_INTR_ENABLE_ADDRESS); } -static void ath10k_pci_enable_legacy_irq(struct ath10k *ar) +void ath10k_pci_enable_legacy_irq(struct ath10k *ar) { ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS, @@ -809,7 +808,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe) } } -static void ath10k_pci_rx_post(struct ath10k *ar) +void ath10k_pci_rx_post(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; @@ -818,7 +817,7 @@ static void ath10k_pci_rx_post(struct ath10k *ar) ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]); } -static void ath10k_pci_rx_replenish_retry(unsigned long ptr) +void ath10k_pci_rx_replenish_retry(unsigned long ptr) { struct ath10k *ar = (void *)ptr; @@ -1007,8 +1006,8 @@ static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest, #define ath10k_pci_diag_read_hi(ar, dest, src, len) \ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len) -static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, - const void *data, int nbytes) +int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret = 0; @@ -1263,8 +1262,8 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state) ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver); } -static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, - struct ath10k_hif_sg_item *items, int n_items) +int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id]; @@ -1332,13 +1331,13 @@ err: return err; } -static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, - size_t buf_len) +int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len) { return ath10k_pci_diag_read_mem(ar, address, buf, buf_len); } -static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) +u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); @@ -1406,8 +1405,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar) queue_work(ar->workqueue, &ar->restart_work); } -static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, - int force) +void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n"); @@ -1432,7 +1431,7 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, ath10k_ce_per_engine_service(ar, pipe); } -static void ath10k_pci_kill_tasklet(struct ath10k *ar) +void ath10k_pci_kill_tasklet(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; @@ -1446,8 +1445,8 @@ static void ath10k_pci_kill_tasklet(struct ath10k *ar) del_timer_sync(&ar_pci->rx_post_retry); } -static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, - u8 *ul_pipe, u8 *dl_pipe) +int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) { const struct service_to_pipe *entry; bool ul_set = false, dl_set = false; @@ -1491,8 +1490,8 @@ static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, return 0; } -static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, - u8 *ul_pipe, u8 *dl_pipe) +void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, + u8 *ul_pipe, u8 *dl_pipe) { ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n"); @@ -1668,7 +1667,7 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) } } -static void ath10k_pci_ce_deinit(struct ath10k *ar) +void ath10k_pci_ce_deinit(struct ath10k *ar) { int i; @@ -1676,7 +1675,7 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar) ath10k_ce_deinit_pipe(ar, i); } -static void ath10k_pci_flush(struct ath10k *ar) +void ath10k_pci_flush(struct ath10k *ar) { ath10k_pci_kill_tasklet(ar); ath10k_pci_buffer_cleanup(ar); @@ -1711,9 +1710,9 @@ static void ath10k_pci_hif_stop(struct ath10k *ar) spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } -static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, - void *req, u32 req_len, - void *resp, u32 *resp_len) +int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, + void *req, u32 req_len, + void *resp, u32 *resp_len) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG]; @@ -1907,7 +1906,7 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) return 1; } -static int ath10k_pci_init_config(struct ath10k *ar) +int ath10k_pci_init_config(struct ath10k *ar) { u32 interconnect_targ_addr; u32 pcie_state_targ_addr = 0; @@ -2071,7 +2070,7 @@ static void ath10k_pci_override_ce_config(struct ath10k *ar) target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1); } -static int ath10k_pci_alloc_pipes(struct ath10k *ar) +int ath10k_pci_alloc_pipes(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci_pipe *pipe; @@ -2102,7 +2101,7 @@ static int ath10k_pci_alloc_pipes(struct ath10k *ar) return 0; } -static void ath10k_pci_free_pipes(struct ath10k *ar) +void ath10k_pci_free_pipes(struct ath10k *ar) { int i; @@ -2110,7 +2109,7 @@ static void ath10k_pci_free_pipes(struct ath10k *ar) ath10k_ce_free_pipe(ar, i); } -static int ath10k_pci_init_pipes(struct ath10k *ar) +int ath10k_pci_init_pipes(struct ath10k *ar) { int i, ret; @@ -2453,7 +2452,7 @@ err_sleep: return ret; } -static void ath10k_pci_hif_power_down(struct ath10k *ar) +void ath10k_pci_hif_power_down(struct ath10k *ar) { ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n"); @@ -2722,7 +2721,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar) free_irq(ar_pci->pdev->irq + i, ar); } -static void ath10k_pci_init_irq_tasklets(struct ath10k *ar) +void ath10k_pci_init_irq_tasklets(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i; @@ -2808,7 +2807,7 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar) return 0; } -static int ath10k_pci_wait_for_target_init(struct ath10k *ar) +int ath10k_pci_wait_for_target_init(struct ath10k *ar) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned long timeout; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index f91bf333cb75..ae76131aa081 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -253,6 +253,38 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset); u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr); u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr); +int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id, + struct ath10k_hif_sg_item *items, int n_items); +int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf, + size_t buf_len); +int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, + const void *data, int nbytes); +int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len, + void *resp, u32 *resp_len); +int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe); +void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe, + u8 *dl_pipe); +void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe, + int force); +u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe); +void ath10k_pci_hif_power_down(struct ath10k *ar); +int ath10k_pci_alloc_pipes(struct ath10k *ar); +void ath10k_pci_free_pipes(struct ath10k *ar); +void ath10k_pci_free_pipes(struct ath10k *ar); +void ath10k_pci_rx_replenish_retry(unsigned long ptr); +void ath10k_pci_ce_deinit(struct ath10k *ar); +void ath10k_pci_init_irq_tasklets(struct ath10k *ar); +void ath10k_pci_kill_tasklet(struct ath10k *ar); +int ath10k_pci_init_pipes(struct ath10k *ar); +int ath10k_pci_init_config(struct ath10k *ar); +void ath10k_pci_rx_post(struct ath10k *ar); +void ath10k_pci_flush(struct ath10k *ar); +void ath10k_pci_enable_legacy_irq(struct ath10k *ar); +bool ath10k_pci_irq_pending(struct ath10k *ar); +void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar); +int ath10k_pci_wait_for_target_init(struct ath10k *ar); + /* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too * frequently. To avoid this put SoC to sleep after a very conservative grace * period. Adjust with great care. -- cgit v1.2.3 From 4ddb3299aa49ddeb40680d7427d3259d32aefa6d Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:23 +0530 Subject: ath10k: make ath10k_pci_read32/write32() ops more generic ath10k_pci_read32/write32() does work more specific to PCI by ensuring pci wake/sleep for every read and write. There is a plan to use most of stuff available in pci.c (irq stuff, copy engine, etc) for AHB case. Such kind of pci wake/sleep for every read/write is not required in AHB case (qca4019). All those reusable areas in pci.c and ce.c calls ath10k_pci_read32/write32() for low level read and write. In fact, ath10k_pci_read32/write32() should do what it does today for PCI case. But for AHB, it has to do differently. To make ath10k_pci_read32/write32() more generic, new function pointers are added in ar_pci for the function which does operation more close to the bus. Later, corresponding bus specific read and write function will be mapped to that. ath10k_pci_read32/write32() are changed to call directly those function pointers without worrying which bus underlying to it. Also, the function to get number of bank is changed in the same way. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 34 +++++++++++++++++++++++++++++++--- drivers/net/wireless/ath/ath10k/pci.h | 8 ++++++++ 2 files changed, 39 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 956e548fd4a7..c5f6604fa76d 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -619,7 +619,7 @@ static void ath10k_pci_sleep_sync(struct ath10k *ar) spin_unlock_irqrestore(&ar_pci->ps_lock, flags); } -void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int ret; @@ -641,7 +641,7 @@ void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) ath10k_pci_sleep(ar); } -u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset) { struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); u32 val; @@ -666,6 +666,20 @@ u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) return val; } +inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + ar_pci->bus_ops->write32(ar, offset, value); +} + +inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + return ar_pci->bus_ops->read32(ar, offset); +} + u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr) { return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr); @@ -1906,6 +1920,13 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar) return 1; } +static int ath10k_bus_get_num_banks(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + return ar_pci->bus_ops->get_num_banks(ar); +} + int ath10k_pci_init_config(struct ath10k *ar) { u32 interconnect_targ_addr; @@ -2017,7 +2038,7 @@ int ath10k_pci_init_config(struct ath10k *ar) /* first bank is switched to IRAM */ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & HI_EARLY_ALLOC_MAGIC_MASK); - ealloc_value |= ((ath10k_pci_get_num_banks(ar) << + ealloc_value |= ((ath10k_bus_get_num_banks(ar) << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & HI_EARLY_ALLOC_IRAM_BANKS_MASK); @@ -2988,6 +3009,12 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) return false; } +static const struct ath10k_bus_ops ath10k_pci_bus_ops = { + .read32 = ath10k_bus_pci_read32, + .write32 = ath10k_bus_pci_write32, + .get_num_banks = ath10k_pci_get_num_banks, +}; + static int ath10k_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_dev) { @@ -3038,6 +3065,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar_pci->ar = ar; ar->dev_id = pci_dev->device; ar_pci->pci_ps = pci_ps; + ar_pci->bus_ops = &ath10k_pci_bus_ops; ar->id.vendor = pdev->vendor; ar->id.device = pdev->device; diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index ae76131aa081..41f3faced84c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -157,6 +157,12 @@ struct ath10k_pci_supp_chip { u32 rev_id; }; +struct ath10k_bus_ops { + u32 (*read32)(struct ath10k *ar, u32 offset); + void (*write32)(struct ath10k *ar, u32 offset, u32 value); + int (*get_num_banks)(struct ath10k *ar); +}; + struct ath10k_pci { struct pci_dev *pdev; struct device *dev; @@ -225,6 +231,8 @@ struct ath10k_pci { * on MMIO read/write. */ bool pci_ps; + + const struct ath10k_bus_ops *bus_ops; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) -- cgit v1.2.3 From 90188f807f2a8bada8e165b932b56f7e03b0a9b9 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:24 +0530 Subject: ath10k: pull reusable code from pci probe and remove for ahb Some of the code present in ath10k_pci_{probe|remove} are reusable in ahb case too. To avoid code duplication, move reusable code to new functions. Later, those new functions can be called from ahb module's probe and exit functions. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 64 +++++++++++++++++++++++------------ drivers/net/wireless/ath/ath10k/pci.h | 2 ++ 2 files changed, 44 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index c5f6604fa76d..6ef878c1eebc 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3009,6 +3009,37 @@ static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id) return false; } +int ath10k_pci_setup_resource(struct ath10k *ar) +{ + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + int ret; + + spin_lock_init(&ar_pci->ce_lock); + spin_lock_init(&ar_pci->ps_lock); + + setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, + (unsigned long)ar); + + if (QCA_REV_6174(ar)) + ath10k_pci_override_ce_config(ar); + + ret = ath10k_pci_alloc_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", + ret); + return ret; + } + + return 0; +} + +void ath10k_pci_release_resource(struct ath10k *ar) +{ + ath10k_pci_kill_tasklet(ar); + ath10k_pci_ce_deinit(ar); + ath10k_pci_free_pipes(ar); +} + static const struct ath10k_bus_ops ath10k_pci_bus_ops = { .read32 = ath10k_bus_pci_read32, .write32 = ath10k_bus_pci_write32, @@ -3072,34 +3103,25 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ar->id.subsystem_vendor = pdev->subsystem_vendor; ar->id.subsystem_device = pdev->subsystem_device; - spin_lock_init(&ar_pci->ce_lock); - spin_lock_init(&ar_pci->ps_lock); - - setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, - (unsigned long)ar); setup_timer(&ar_pci->ps_timer, ath10k_pci_ps_timer, (unsigned long)ar); - ret = ath10k_pci_claim(ar); + ret = ath10k_pci_setup_resource(ar); if (ret) { - ath10k_err(ar, "failed to claim device: %d\n", ret); + ath10k_err(ar, "failed to setup resource: %d\n", ret); goto err_core_destroy; } - if (QCA_REV_6174(ar)) - ath10k_pci_override_ce_config(ar); - - ret = ath10k_pci_alloc_pipes(ar); + ret = ath10k_pci_claim(ar); if (ret) { - ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", - ret); - goto err_sleep; + ath10k_err(ar, "failed to claim device: %d\n", ret); + goto err_free_pipes; } ret = ath10k_pci_force_wake(ar); if (ret) { ath10k_warn(ar, "failed to wake up device : %d\n", ret); - goto err_free_pipes; + goto err_sleep; } ath10k_pci_ce_deinit(ar); @@ -3108,7 +3130,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ret = ath10k_pci_init_irq(ar); if (ret) { ath10k_err(ar, "failed to init irqs: %d\n", ret); - goto err_free_pipes; + goto err_sleep; } ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n", @@ -3154,13 +3176,13 @@ err_free_irq: err_deinit_irq: ath10k_pci_deinit_irq(ar); -err_free_pipes: - ath10k_pci_free_pipes(ar); - err_sleep: ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); +err_free_pipes: + ath10k_pci_free_pipes(ar); + err_core_destroy: ath10k_core_destroy(ar); @@ -3184,10 +3206,8 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ath10k_core_unregister(ar); ath10k_pci_free_irq(ar); - ath10k_pci_kill_tasklet(ar); ath10k_pci_deinit_irq(ar); - ath10k_pci_ce_deinit(ar); - ath10k_pci_free_pipes(ar); + ath10k_pci_release_resource(ar); ath10k_pci_sleep_sync(ar); ath10k_pci_release(ar); ath10k_core_destroy(ar); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index 41f3faced84c..fcfdbca8e55e 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -292,6 +292,8 @@ void ath10k_pci_enable_legacy_irq(struct ath10k *ar); bool ath10k_pci_irq_pending(struct ath10k *ar); void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar); int ath10k_pci_wait_for_target_init(struct ath10k *ar); +int ath10k_pci_setup_resource(struct ath10k *ar); +void ath10k_pci_release_resource(struct ath10k *ar); /* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too * frequently. To avoid this put SoC to sleep after a very conservative grace -- cgit v1.2.3 From 0b523ced9a3cd05abf240e913b2d80e7d0fa3478 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:25 +0530 Subject: ath10k: add basic skeleton to support ahb qca4019 uses ahb instead of pci where it slightly differs in device enumeration, clock control, reset control, etc. Good thing is that ahb also uses copy engine for the data transaction. So, the most of the stuff implemented in pci.c/ce.c are reusable in ahb case too. Device enumeration in ahb case comes through platform driver/device model. All resource details like irq, memory map, clocks, etc for qca4019 can be fetched from of_node of platform device. Simply flow would look like, device tree => platform device (kernel) => platform driver (ath10k) Device tree entry will have all qca4019 resource details and the same info will be passed to kernel. Kernel will prepare new platform device for that entry and expose DT info to of_node in platform device. Later, ath10k would register platform driver with unique compatible name and then kernels binds to corresponding compatible entry & calls ath10k ahb probe functions. From there onwards, ath10k will take control of it and move forward. New bool flag CONFIG_ATH10K_AHB is added in Kconfig to conditionally enable ahb support in ath10k. On enabling this flag, ath10k_pci.ko will have ahb support. This patch adds only basic skeleton and few macros to support ahb in the context of qca4019. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 6 +++ drivers/net/wireless/ath/ath10k/Makefile | 2 + drivers/net/wireless/ath/ath10k/ahb.c | 67 ++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 41 +++++++++++++++++++ drivers/net/wireless/ath/ath10k/core.h | 3 ++ drivers/net/wireless/ath/ath10k/debug.h | 1 + drivers/net/wireless/ath/ath10k/hw.h | 2 + drivers/net/wireless/ath/ath10k/pci.c | 8 ++++ drivers/net/wireless/ath/ath10k/pci.h | 1 + 9 files changed, 131 insertions(+) create mode 100644 drivers/net/wireless/ath/ath10k/ahb.c create mode 100644 drivers/net/wireless/ath/ath10k/ahb.h (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 03aa35f999a1..d7f207a6b0e0 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -15,6 +15,12 @@ config ATH10K_PCI ---help--- This module adds support for PCIE bus +config ATH10K_AHB + bool "Atheros ath10k AHB support" + depends on ATH10K_PCI && OF + ---help--- + This module adds support for AHB bus + config ATH10K_DEBUG bool "Atheros ath10k debugging" depends on ATH10K diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index c04fb00e7930..930fadd940d8 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -25,5 +25,7 @@ obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o ath10k_pci-y += pci.o \ ce.o +ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o + # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c new file mode 100644 index 000000000000..129f4f4491d0 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include "core.h" +#include "debug.h" +#include "ahb.h" + +static const struct of_device_id ath10k_ahb_of_match[] = { + /* TODO: enable this entry once everything in place. + * { .compatible = "qcom,ipq4019-wifi", + * .data = (void *)ATH10K_HW_QCA4019 }, + */ + { } +}; + +MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); + +static int ath10k_ahb_probe(struct platform_device *pdev) +{ + return 0; +} + +static int ath10k_ahb_remove(struct platform_device *pdev) +{ + return 0; +} + +static struct platform_driver ath10k_ahb_driver = { + .driver = { + .name = "ath10k_ahb", + .of_match_table = ath10k_ahb_of_match, + }, + .probe = ath10k_ahb_probe, + .remove = ath10k_ahb_remove, +}; + +int ath10k_ahb_init(void) +{ + int ret; + + printk(KERN_ERR "AHB support is still work in progress\n"); + + ret = platform_driver_register(&ath10k_ahb_driver); + if (ret) + printk(KERN_ERR "failed to register ath10k ahb driver: %d\n", + ret); + return ret; +} + +void ath10k_ahb_exit(void) +{ + platform_driver_unregister(&ath10k_ahb_driver); +} diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h new file mode 100644 index 000000000000..d1afe93f9329 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved. + * Copyright (c) 2015 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AHB_H_ +#define _AHB_H_ + +#include + +#ifdef CONFIG_ATH10K_AHB + +int ath10k_ahb_init(void); +void ath10k_ahb_exit(void); + +#else /* CONFIG_ATH10K_AHB */ + +static inline int ath10k_ahb_init(void) +{ + return 0; +} + +static inline void ath10k_ahb_exit(void) +{ +} + +#endif /* CONFIG_ATH10K_AHB */ + +#endif /* _AHB_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 5067a0ff4e89..2e411b5258c2 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -69,6 +69,7 @@ struct ath10k; enum ath10k_bus { ATH10K_BUS_PCI, + ATH10K_BUS_AHB, }; static inline const char *ath10k_bus_str(enum ath10k_bus bus) @@ -76,6 +77,8 @@ static inline const char *ath10k_bus_str(enum ath10k_bus bus) switch (bus) { case ATH10K_BUS_PCI: return "pci"; + case ATH10K_BUS_AHB: + return "ahb"; } return "unknown"; diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h index f273478e2afb..6206edd7c49f 100644 --- a/drivers/net/wireless/ath/ath10k/debug.h +++ b/drivers/net/wireless/ath/ath10k/debug.h @@ -37,6 +37,7 @@ enum ath10k_debug_mask { ATH10K_DBG_TESTMODE = 0x00001000, ATH10K_DBG_WMI_PRINT = 0x00002000, ATH10K_DBG_PCI_PS = 0x00004000, + ATH10K_DBG_AHB = 0x00008000, ATH10K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 0678831e8671..f8850155140a 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -200,6 +200,7 @@ enum ath10k_hw_rev { ATH10K_HW_QCA6174, ATH10K_HW_QCA99X0, ATH10K_HW_QCA9377, + ATH10K_HW_QCA4019, }; struct ath10k_hw_regs { @@ -253,6 +254,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, #define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174) #define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0) #define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377) +#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019) /* Known pecularities: * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 6ef878c1eebc..0e338b657210 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -851,6 +851,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr) 0x7ff) << 21; break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA4019: val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS); break; } @@ -1529,6 +1530,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to mask irq/MSI. */ @@ -1551,6 +1553,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar) CORE_CTRL_ADDRESS, val); break; case ATH10K_HW_QCA99X0: + case ATH10K_HW_QCA4019: /* TODO: Find appropriate register configuration for QCA99X0 * to unmask irq/MSI. */ @@ -3231,6 +3234,10 @@ static int __init ath10k_pci_init(void) printk(KERN_ERR "failed to register ath10k pci driver: %d\n", ret); + ret = ath10k_ahb_init(); + if (ret) + printk(KERN_ERR "ahb init failed: %d\n", ret); + return ret; } module_init(ath10k_pci_init); @@ -3238,6 +3245,7 @@ module_init(ath10k_pci_init); static void __exit ath10k_pci_exit(void) { pci_unregister_driver(&ath10k_pci_driver); + ath10k_ahb_exit(); } module_exit(ath10k_pci_exit); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index fcfdbca8e55e..c2d4a79aa29a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -22,6 +22,7 @@ #include "hw.h" #include "ce.h" +#include "ahb.h" /* * maximum number of bytes that can be handled atomically by DiagRead/DiagWrite -- cgit v1.2.3 From 37a219a556b01b3236156ecf58717c1e41b552d9 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:26 +0530 Subject: ath10k: include qca4019 register map table New register table is added for qca4019 to tell about it's register mapping details. Nothing much other than this. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 4 ++++ drivers/net/wireless/ath/ath10k/hw.c | 39 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 2 ++ 3 files changed, 45 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 48a26206c5c1..1f4a27881936 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1980,6 +1980,10 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ar->regs = &qca99x0_regs; ar->hw_values = &qca99x0_values; break; + case ATH10K_HW_QCA4019: + ar->regs = &qca4019_regs; + ar->hw_values = &qca4019_values; + break; default: ath10k_err(ar, "unsupported core hardware revision %d\n", hw_rev); diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 7b84d08a5154..f544d48518c3 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -109,6 +109,38 @@ const struct ath10k_hw_regs qca99x0_regs = { .pcie_intr_clr_address = 0x00000010, }; +const struct ath10k_hw_regs qca4019_regs = { + .rtc_soc_base_address = 0x00080000, + .soc_core_base_address = 0x00082000, + .ce_wrapper_base_address = 0x0004d000, + .ce0_base_address = 0x0004a000, + .ce1_base_address = 0x0004a400, + .ce2_base_address = 0x0004a800, + .ce3_base_address = 0x0004ac00, + .ce4_base_address = 0x0004b000, + .ce5_base_address = 0x0004b400, + .ce6_base_address = 0x0004b800, + .ce7_base_address = 0x0004bc00, + /* qca4019 supports upto 12 copy engines. Since base address + * of ce8 to ce11 are not directly referred in the code, + * no need have them in separate members in this table. + * Copy Engine Address + * CE8 0x0004c000 + * CE9 0x0004c400 + * CE10 0x0004c800 + * CE11 0x0004cc00 + */ + .soc_reset_control_si0_rst_mask = 0x00000001, + .soc_reset_control_ce_rst_mask = 0x00000100, + .soc_chip_id_address = 0x000000ec, + .fw_indicator_address = 0x0004f00c, + .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c, + .ce_wrap_intr_sum_host_msi_mask = 0x00fff000, + .pcie_intr_fw_mask = 0x00100000, + .pcie_intr_ce_mask_all = 0x000fff00, + .pcie_intr_clr_address = 0x00000010, +}; + const struct ath10k_hw_values qca988x_values = { .rtc_state_val_on = 3, .ce_count = 8, @@ -136,6 +168,13 @@ const struct ath10k_hw_values qca99x0_values = { .ce_desc_meta_data_lsb = 4, }; +const struct ath10k_hw_values qca4019_values = { + .ce_count = 12, + .num_target_ce_config_wlan = 10, + .ce_desc_meta_data_mask = 0xFFF0, + .ce_desc_meta_data_lsb = 4, +}; + void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev) { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index f8850155140a..f57a37bfc9f6 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -233,6 +233,7 @@ struct ath10k_hw_regs { extern const struct ath10k_hw_regs qca988x_regs; extern const struct ath10k_hw_regs qca6174_regs; extern const struct ath10k_hw_regs qca99x0_regs; +extern const struct ath10k_hw_regs qca4019_regs; struct ath10k_hw_values { u32 rtc_state_val_on; @@ -246,6 +247,7 @@ struct ath10k_hw_values { extern const struct ath10k_hw_values qca988x_values; extern const struct ath10k_hw_values qca6174_values; extern const struct ath10k_hw_values qca99x0_values; +extern const struct ath10k_hw_values qca4019_values; void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey, u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev); -- cgit v1.2.3 From 7f8e79cdc2534559e3e38f5aee0b9f78e57e8fb3 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:27 +0530 Subject: ath10k: add helper functions in ahb.c for reg rd/wr qca4019 deals with below register memory region to control the clock, reset, etc. - Memory to control wifi core - gcc (outside of wifi) - tcsr (outside of wifi) Add new helper functions to perform read/write in above registers spaces. Actual ioremap for above registers are done in later patch. Struct ath10k_ahb is introduced to maintain ahb specific info and memory this struct will be allocated in the continuation of struct ath10k_pci (again, memory ath10k_ahb is allocated in the later patch). Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 55 +++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 7 +++++ drivers/net/wireless/ath/ath10k/pci.h | 6 ++++ 3 files changed, 68 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 129f4f4491d0..eab4b47237e0 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -17,6 +17,7 @@ #include #include "core.h" #include "debug.h" +#include "pci.h" #include "ahb.h" static const struct of_device_id ath10k_ahb_of_match[] = { @@ -29,6 +30,60 @@ static const struct of_device_id ath10k_ahb_of_match[] = { MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match); +static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar) +{ + return &((struct ath10k_pci *)ar->drv_priv)->ahb[0]; +} + +static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + iowrite32(value, ar_ahb->mem + offset); +} + +static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->mem + offset); +} + +static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->gcc_mem + offset); +} + +static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + iowrite32(value, ar_ahb->tcsr_mem + offset); +} + +static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + return ioread32(ar_ahb->tcsr_mem + offset); +} + +static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr) +{ + return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr); +} + +static int ath10k_ahb_get_num_banks(struct ath10k *ar) +{ + if (ar->hw_rev == ATH10K_HW_QCA4019) + return 1; + + ath10k_warn(ar, "unknown number of banks, assuming 1\n"); + return 1; +} + static int ath10k_ahb_probe(struct platform_device *pdev) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index d1afe93f9329..77d15af59c4a 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -20,6 +20,13 @@ #include +struct ath10k_ahb { + struct platform_device *pdev; + void __iomem *mem; + void __iomem *gcc_mem; + void __iomem *tcsr_mem; +}; + #ifdef CONFIG_ATH10K_AHB int ath10k_ahb_init(void); diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h index c2d4a79aa29a..249c73a69800 100644 --- a/drivers/net/wireless/ath/ath10k/pci.h +++ b/drivers/net/wireless/ath/ath10k/pci.h @@ -234,6 +234,12 @@ struct ath10k_pci { bool pci_ps; const struct ath10k_bus_ops *bus_ops; + + /* Keep this entry in the last, memory for struct ath10k_ahb is + * allocated (ahb support enabled case) in the continuation of + * this struct. + */ + struct ath10k_ahb ahb[0]; }; static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) -- cgit v1.2.3 From 8beff219c528d19c66e9d2c40fa868d7e7c2e36f Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:28 +0530 Subject: ath10k: add clock ctrl related functions in ahb pre qca4019 chipsets has/uses internal clock generator for the operation. But, qca4019 uses external clocks supplied from outside of target (ie, outside of wifi core). Three different clocks (cmd clock, ref clock, rtc clock) comes into picture in qca4019. All those clocks needs to configured with help of global clock controller (gcc) to make qca4019 functioning. Add functions for clock init/deinit, clock enable/disable in ahb. This is just a preparation, functions added in this patch will be used in later patches. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 123 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 4 ++ 2 files changed, 127 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index eab4b47237e0..11185c0dc6ad 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -15,6 +15,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include +#include #include "core.h" #include "debug.h" #include "pci.h" @@ -84,6 +85,128 @@ static int ath10k_ahb_get_num_banks(struct ath10k *ar) return 1; } +static int ath10k_ahb_clock_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + int ret; + + dev = &ar_ahb->pdev->dev; + + ar_ahb->cmd_clk = clk_get(dev, "wifi_wcss_cmd"); + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) { + ath10k_err(ar, "failed to get cmd clk: %ld\n", + PTR_ERR(ar_ahb->cmd_clk)); + ret = ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV; + goto out; + } + + ar_ahb->ref_clk = clk_get(dev, "wifi_wcss_ref"); + if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) { + ath10k_err(ar, "failed to get ref clk: %ld\n", + PTR_ERR(ar_ahb->ref_clk)); + ret = ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV; + goto err_cmd_clk_put; + } + + ar_ahb->rtc_clk = clk_get(dev, "wifi_wcss_rtc"); + if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { + ath10k_err(ar, "failed to get rtc clk: %ld\n", + PTR_ERR(ar_ahb->rtc_clk)); + ret = ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV; + goto err_ref_clk_put; + } + + return 0; + +err_ref_clk_put: + clk_put(ar_ahb->ref_clk); + +err_cmd_clk_put: + clk_put(ar_ahb->cmd_clk); + +out: + return ret; +} + +static void ath10k_ahb_clock_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) + clk_put(ar_ahb->cmd_clk); + + if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) + clk_put(ar_ahb->ref_clk); + + if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) + clk_put(ar_ahb->rtc_clk); + + ar_ahb->cmd_clk = NULL; + ar_ahb->ref_clk = NULL; + ar_ahb->rtc_clk = NULL; +} + +static int ath10k_ahb_clock_enable(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + int ret; + + dev = &ar_ahb->pdev->dev; + + if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) || + IS_ERR_OR_NULL(ar_ahb->ref_clk) || + IS_ERR_OR_NULL(ar_ahb->rtc_clk)) { + ath10k_err(ar, "clock(s) is/are not initialized\n"); + ret = -EIO; + goto out; + } + + ret = clk_prepare_enable(ar_ahb->cmd_clk); + if (ret) { + ath10k_err(ar, "failed to enable cmd clk: %d\n", ret); + goto out; + } + + ret = clk_prepare_enable(ar_ahb->ref_clk); + if (ret) { + ath10k_err(ar, "failed to enable ref clk: %d\n", ret); + goto err_cmd_clk_disable; + } + + ret = clk_prepare_enable(ar_ahb->rtc_clk); + if (ret) { + ath10k_err(ar, "failed to enable rtc clk: %d\n", ret); + goto err_ref_clk_disable; + } + + return 0; + +err_ref_clk_disable: + clk_disable_unprepare(ar_ahb->ref_clk); + +err_cmd_clk_disable: + clk_disable_unprepare(ar_ahb->cmd_clk); + +out: + return ret; +} + +static void ath10k_ahb_clock_disable(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + if (!IS_ERR_OR_NULL(ar_ahb->cmd_clk)) + clk_disable_unprepare(ar_ahb->cmd_clk); + + if (!IS_ERR_OR_NULL(ar_ahb->ref_clk)) + clk_disable_unprepare(ar_ahb->ref_clk); + + if (!IS_ERR_OR_NULL(ar_ahb->rtc_clk)) + clk_disable_unprepare(ar_ahb->rtc_clk); +} + static int ath10k_ahb_probe(struct platform_device *pdev) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index 77d15af59c4a..753b4330075a 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -25,6 +25,10 @@ struct ath10k_ahb { void __iomem *mem; void __iomem *gcc_mem; void __iomem *tcsr_mem; + + struct clk *cmd_clk; + struct clk *ref_clk; + struct clk *rtc_clk; }; #ifdef CONFIG_ATH10K_AHB -- cgit v1.2.3 From 14854bfd9daaa20778db64f224a957c2abaab6fe Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:29 +0530 Subject: ath10k: add reset ctrl related functions in ahb To perform reset on qca4019 wifi, multiple reset lines needs to be toggled in a sequence with help of reset controller support in the kernel. This patch adds functions to reset control init/deinit and release reset. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/Kconfig | 2 +- drivers/net/wireless/ath/ath10k/ahb.c | 138 ++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 6 ++ 3 files changed, 145 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index d7f207a6b0e0..db1ca629cbd6 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -17,7 +17,7 @@ config ATH10K_PCI config ATH10K_AHB bool "Atheros ath10k AHB support" - depends on ATH10K_PCI && OF + depends on ATH10K_PCI && OF && RESET_CONTROLLER ---help--- This module adds support for AHB bus diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 11185c0dc6ad..d1f197220e30 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -16,6 +16,7 @@ */ #include #include +#include #include "core.h" #include "debug.h" #include "pci.h" @@ -207,6 +208,143 @@ static void ath10k_ahb_clock_disable(struct ath10k *ar) clk_disable_unprepare(ar_ahb->rtc_clk); } +static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + int ret; + + dev = &ar_ahb->pdev->dev; + + ar_ahb->core_cold_rst = reset_control_get(dev, "wifi_core_cold"); + if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) { + ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n", + PTR_ERR(ar_ahb->core_cold_rst)); + ret = ar_ahb->core_cold_rst ? + PTR_ERR(ar_ahb->core_cold_rst) : -ENODEV; + goto out; + } + + ar_ahb->radio_cold_rst = reset_control_get(dev, "wifi_radio_cold"); + if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) { + ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_cold_rst)); + ret = ar_ahb->radio_cold_rst ? + PTR_ERR(ar_ahb->radio_cold_rst) : -ENODEV; + goto err_core_cold_rst_put; + } + + ar_ahb->radio_warm_rst = reset_control_get(dev, "wifi_radio_warm"); + if (IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) { + ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_warm_rst)); + ret = ar_ahb->radio_warm_rst ? + PTR_ERR(ar_ahb->radio_warm_rst) : -ENODEV; + goto err_radio_cold_rst_put; + } + + ar_ahb->radio_srif_rst = reset_control_get(dev, "wifi_radio_srif"); + if (IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) { + ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n", + PTR_ERR(ar_ahb->radio_srif_rst)); + ret = ar_ahb->radio_srif_rst ? + PTR_ERR(ar_ahb->radio_srif_rst) : -ENODEV; + goto err_radio_warm_rst_put; + } + + ar_ahb->cpu_init_rst = reset_control_get(dev, "wifi_cpu_init"); + if (IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n", + PTR_ERR(ar_ahb->cpu_init_rst)); + ret = ar_ahb->cpu_init_rst ? + PTR_ERR(ar_ahb->cpu_init_rst) : -ENODEV; + goto err_radio_srif_rst_put; + } + + return 0; + +err_radio_srif_rst_put: + reset_control_put(ar_ahb->radio_srif_rst); + +err_radio_warm_rst_put: + reset_control_put(ar_ahb->radio_warm_rst); + +err_radio_cold_rst_put: + reset_control_put(ar_ahb->radio_cold_rst); + +err_core_cold_rst_put: + reset_control_put(ar_ahb->core_cold_rst); + +out: + return ret; +} + +static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + if (!IS_ERR_OR_NULL(ar_ahb->core_cold_rst)) + reset_control_put(ar_ahb->core_cold_rst); + + if (!IS_ERR_OR_NULL(ar_ahb->radio_cold_rst)) + reset_control_put(ar_ahb->radio_cold_rst); + + if (!IS_ERR_OR_NULL(ar_ahb->radio_warm_rst)) + reset_control_put(ar_ahb->radio_warm_rst); + + if (!IS_ERR_OR_NULL(ar_ahb->radio_srif_rst)) + reset_control_put(ar_ahb->radio_srif_rst); + + if (!IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) + reset_control_put(ar_ahb->cpu_init_rst); + + ar_ahb->core_cold_rst = NULL; + ar_ahb->radio_cold_rst = NULL; + ar_ahb->radio_warm_rst = NULL; + ar_ahb->radio_srif_rst = NULL; + ar_ahb->cpu_init_rst = NULL; +} + +static int ath10k_ahb_release_reset(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); + return -EINVAL; + } + + ret = reset_control_deassert(ar_ahb->radio_cold_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->radio_warm_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->radio_srif_rst); + if (ret) { + ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret); + return ret; + } + + ret = reset_control_deassert(ar_ahb->cpu_init_rst); + if (ret) { + ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret); + return ret; + } + + return 0; +} + static int ath10k_ahb_probe(struct platform_device *pdev) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index 753b4330075a..2904b7bc9459 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -29,6 +29,12 @@ struct ath10k_ahb { struct clk *cmd_clk; struct clk *ref_clk; struct clk *rtc_clk; + + struct reset_control *core_cold_rst; + struct reset_control *radio_cold_rst; + struct reset_control *radio_warm_rst; + struct reset_control *radio_srif_rst; + struct reset_control *cpu_init_rst; }; #ifdef CONFIG_ATH10K_AHB -- cgit v1.2.3 From 133df0f849bc7b5448c62272986af626f2bda6bd Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:30 +0530 Subject: ath10k: add chip and bus halt logic in ahb Add function to perform chip halt sequence and function to halt axi bus in ahb module. Mainly used in the scenario like driver unload. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 113 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 15 +++++ 2 files changed, 128 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index d1f197220e30..230507890fbf 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -345,6 +345,119 @@ static int ath10k_ahb_release_reset(struct ath10k *ar) return 0; } +static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg, + u32 haltack_reg) +{ + unsigned long timeout; + u32 val; + + /* Issue halt axi bus request */ + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); + val |= AHB_AXI_BUS_HALT_REQ; + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); + + /* Wait for axi bus halted ack */ + timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT); + do { + val = ath10k_ahb_tcsr_read32(ar, haltack_reg); + if (val & AHB_AXI_BUS_HALT_ACK) + break; + + mdelay(1); + } while (time_before(jiffies, timeout)); + + if (!(val & AHB_AXI_BUS_HALT_ACK)) { + ath10k_err(ar, "failed to halt axi bus: %d\n", val); + return; + } + + ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n"); +} + +static void ath10k_ahb_halt_chip(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg; + u32 val; + int ret; + + if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) || + IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) || + IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) { + ath10k_err(ar, "rst ctrl(s) is/are not initialized\n"); + return; + } + + core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG); + + switch (core_id) { + case 0: + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG; + haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ; + haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK; + break; + case 1: + glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG; + haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ; + haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK; + break; + default: + ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n", + core_id); + return; + } + + ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg); + + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); + val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); + + ret = reset_control_assert(ar_ahb->core_cold_rst); + if (ret) + ath10k_err(ar, "failed to assert core cold rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_cold_rst); + if (ret) + ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_warm_rst); + if (ret) + ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->radio_srif_rst); + if (ret) + ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret); + msleep(1); + + ret = reset_control_assert(ar_ahb->cpu_init_rst); + if (ret) + ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret); + msleep(10); + + /* Clear halt req and core clock disable req before + * deasserting wifi core reset. + */ + val = ath10k_ahb_tcsr_read32(ar, haltreq_reg); + val &= ~AHB_AXI_BUS_HALT_REQ; + ath10k_ahb_tcsr_write32(ar, haltreq_reg, val); + + val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg); + val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK; + ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val); + + ret = reset_control_deassert(ar_ahb->core_cold_rst); + if (ret) + ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret); + + ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id); +} + static int ath10k_ahb_probe(struct platform_device *pdev) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index 2904b7bc9459..4761eeb475d6 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -39,6 +39,21 @@ struct ath10k_ahb { #ifdef CONFIG_ATH10K_AHB +#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030 + +#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000 +#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004 +#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25) + +#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000 +#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010 +#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004 +#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014 + +#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */ +#define AHB_AXI_BUS_HALT_REQ 1 +#define AHB_AXI_BUS_HALT_ACK 1 + int ath10k_ahb_init(void); void ath10k_ahb_exit(void); -- cgit v1.2.3 From 1c44fcb9234c80bbf9e4043deab8492e1c0dcf99 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:31 +0530 Subject: ath10k: include irq related functions in ahb Add irq related functions to register,handle,release,disable interrupt. qca4019 supports msi interrupt, but it has the problem. Until the issue gets sorted out, only legacy interrupt model is enabled and used. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 44 +++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 2 ++ 2 files changed, 46 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 230507890fbf..29d0b6c6b35c 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -458,6 +458,50 @@ static void ath10k_ahb_halt_chip(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id); } +static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg) +{ + struct ath10k *ar = arg; + struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); + + if (!ath10k_pci_irq_pending(ar)) + return IRQ_NONE; + + ath10k_pci_disable_and_clear_legacy_irq(ar); + tasklet_schedule(&ar_pci->intr_tq); + + return IRQ_HANDLED; +} + +static int ath10k_ahb_request_irq_legacy(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + int ret; + + ret = request_irq(ar_ahb->irq, + ath10k_ahb_interrupt_handler, + IRQF_SHARED, "ath10k_ahb", ar); + if (ret) { + ath10k_warn(ar, "failed to request legacy irq %d: %d\n", + ar_ahb->irq, ret); + return ret; + } + + return 0; +} + +static void ath10k_ahb_release_irq_legacy(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + free_irq(ar_ahb->irq, ar); +} + +static void ath10k_ahb_irq_disable(struct ath10k *ar) +{ + ath10k_ce_disable_interrupts(ar); + ath10k_pci_disable_and_clear_legacy_irq(ar); +} + static int ath10k_ahb_probe(struct platform_device *pdev) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index 4761eeb475d6..97c40e47dc02 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -26,6 +26,8 @@ struct ath10k_ahb { void __iomem *gcc_mem; void __iomem *tcsr_mem; + int irq; + struct clk *cmd_clk; struct clk *ref_clk; struct clk *rtc_clk; -- cgit v1.2.3 From 704dc4e36769218145d30c41e81a5fbfed3977c7 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:32 +0530 Subject: ath10k: add resource init and deinit in ahb Add function to gather resources required for qca4019 to operate (memory, irq, dma setting, clock init , rest control init) and function release those resources when it's not needed. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 122 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 7 ++ 2 files changed, 129 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index 29d0b6c6b35c..d83a864515a2 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -15,6 +15,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include +#include +#include #include #include #include "core.h" @@ -502,6 +504,126 @@ static void ath10k_ahb_irq_disable(struct ath10k *ar) ath10k_pci_disable_and_clear_legacy_irq(ar); } +static int ath10k_ahb_resource_init(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct platform_device *pdev; + struct device *dev; + struct resource *res; + int ret; + + pdev = ar_ahb->pdev; + dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ath10k_err(ar, "failed to get memory resource\n"); + ret = -ENXIO; + goto out; + } + + ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ar_ahb->mem)) { + ath10k_err(ar, "mem ioremap error\n"); + ret = PTR_ERR(ar_ahb->mem); + goto out; + } + + ar_ahb->mem_len = resource_size(res); + + ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE, + ATH10K_GCC_REG_SIZE); + if (!ar_ahb->gcc_mem) { + ath10k_err(ar, "gcc mem ioremap error\n"); + ret = -ENOMEM; + goto err_mem_unmap; + } + + ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE, + ATH10K_TCSR_REG_SIZE); + if (!ar_ahb->tcsr_mem) { + ath10k_err(ar, "tcsr mem ioremap error\n"); + ret = -ENOMEM; + goto err_gcc_mem_unmap; + } + + ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret); + goto err_tcsr_mem_unmap; + } + + ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n", + ret); + goto err_tcsr_mem_unmap; + } + + ret = ath10k_ahb_clock_init(ar); + if (ret) + goto err_tcsr_mem_unmap; + + ret = ath10k_ahb_rst_ctrl_init(ar); + if (ret) + goto err_clock_deinit; + + ar_ahb->irq = platform_get_irq_byname(pdev, "legacy"); + if (ar_ahb->irq < 0) { + ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq); + goto err_clock_deinit; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%p mem_len: %lu gcc mem: 0x%p tcsr_mem: 0x%p\n", + ar_ahb->mem, ar_ahb->mem_len, + ar_ahb->gcc_mem, ar_ahb->tcsr_mem); + return 0; + +err_clock_deinit: + ath10k_ahb_clock_deinit(ar); + +err_tcsr_mem_unmap: + iounmap(ar_ahb->tcsr_mem); + +err_gcc_mem_unmap: + ar_ahb->tcsr_mem = NULL; + iounmap(ar_ahb->gcc_mem); + +err_mem_unmap: + ar_ahb->gcc_mem = NULL; + devm_iounmap(&pdev->dev, ar_ahb->mem); + +out: + ar_ahb->mem = NULL; + return ret; +} + +static void ath10k_ahb_resource_deinit(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + struct device *dev; + + dev = &ar_ahb->pdev->dev; + + if (ar_ahb->mem) + devm_iounmap(dev, ar_ahb->mem); + + if (ar_ahb->gcc_mem) + iounmap(ar_ahb->gcc_mem); + + if (ar_ahb->tcsr_mem) + iounmap(ar_ahb->tcsr_mem); + + ar_ahb->mem = NULL; + ar_ahb->gcc_mem = NULL; + ar_ahb->tcsr_mem = NULL; + + ath10k_ahb_clock_deinit(ar); + ath10k_ahb_rst_ctrl_deinit(ar); +} + static int ath10k_ahb_probe(struct platform_device *pdev) { return 0; diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index 97c40e47dc02..5bd01b49132b 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -23,6 +23,7 @@ struct ath10k_ahb { struct platform_device *pdev; void __iomem *mem; + unsigned long mem_len; void __iomem *gcc_mem; void __iomem *tcsr_mem; @@ -41,6 +42,12 @@ struct ath10k_ahb { #ifdef CONFIG_ATH10K_AHB +#define ATH10K_GCC_REG_BASE 0x1800000 +#define ATH10K_GCC_REG_SIZE 0x60000 + +#define ATH10K_TCSR_REG_BASE 0x1900000 +#define ATH10K_TCSR_REG_SIZE 0x80000 + #define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030 #define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000 -- cgit v1.2.3 From 0d87c9208a17cb82fed66e29180078ab603086ea Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Wed, 27 Jan 2016 15:24:33 +0530 Subject: ath10k: expose hif ops for ahb Like how pci.c exposes hif ops for the bus specific operation, expose similar hif ops table for ahb with all required functions linked to it. Many ath10k_pci_* functions are reused here in hif ops table. If something is not sharable, new functions are added for ahb and linked to hif ops table. Finally, make ath10k_ahb_probe/remove() to perform what is expected out of it. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/ahb.c | 271 ++++++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/ahb.h | 5 + drivers/net/wireless/ath/ath10k/hw.h | 1 + 3 files changed, 277 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c index d83a864515a2..bd62bc19e758 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.c +++ b/drivers/net/wireless/ath/ath10k/ahb.c @@ -624,13 +624,284 @@ static void ath10k_ahb_resource_deinit(struct ath10k *ar) ath10k_ahb_rst_ctrl_deinit(ar); } +static int ath10k_ahb_prepare_device(struct ath10k *ar) +{ + u32 val; + int ret; + + ret = ath10k_ahb_clock_enable(ar); + if (ret) { + ath10k_err(ar, "failed to enable clocks\n"); + return ret; + } + + /* Clock for the target is supplied from outside of target (ie, + * external clock module controlled by the host). Target needs + * to know what frequency target cpu is configured which is needed + * for target internal use. Read target cpu frequency info from + * gcc register and write into target's scratch register where + * target expects this information. + */ + val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV); + ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val); + + ret = ath10k_ahb_release_reset(ar); + if (ret) + goto err_clk_disable; + + ath10k_ahb_irq_disable(ar); + + ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY); + + ret = ath10k_pci_wait_for_target_init(ar); + if (ret) + goto err_halt_chip; + + return 0; + +err_halt_chip: + ath10k_ahb_halt_chip(ar); + +err_clk_disable: + ath10k_ahb_clock_disable(ar); + + return ret; +} + +static int ath10k_ahb_chip_reset(struct ath10k *ar) +{ + int ret; + + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + + ret = ath10k_ahb_prepare_device(ar); + if (ret) + return ret; + + return 0; +} + +static int ath10k_ahb_wake_target_cpu(struct ath10k *ar) +{ + u32 addr, val; + + addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS; + val = ath10k_ahb_read32(ar, addr); + val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK; + ath10k_ahb_write32(ar, addr, val); + + return 0; +} + +static int ath10k_ahb_hif_start(struct ath10k *ar) +{ + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n"); + + ath10k_ce_enable_interrupts(ar); + ath10k_pci_enable_legacy_irq(ar); + + ath10k_pci_rx_post(ar); + + return 0; +} + +static void ath10k_ahb_hif_stop(struct ath10k *ar) +{ + struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar); + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n"); + + ath10k_ahb_irq_disable(ar); + synchronize_irq(ar_ahb->irq); + + ath10k_pci_flush(ar); +} + +static int ath10k_ahb_hif_power_up(struct ath10k *ar) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n"); + + ret = ath10k_ahb_chip_reset(ar); + if (ret) { + ath10k_err(ar, "failed to reset chip: %d\n", ret); + goto out; + } + + ret = ath10k_pci_init_pipes(ar); + if (ret) { + ath10k_err(ar, "failed to initialize CE: %d\n", ret); + goto out; + } + + ret = ath10k_pci_init_config(ar); + if (ret) { + ath10k_err(ar, "failed to setup init config: %d\n", ret); + goto err_ce_deinit; + } + + ret = ath10k_ahb_wake_target_cpu(ar); + if (ret) { + ath10k_err(ar, "could not wake up target CPU: %d\n", ret); + goto err_ce_deinit; + } + + return 0; + +err_ce_deinit: + ath10k_pci_ce_deinit(ar); +out: + return ret; +} + +static const struct ath10k_hif_ops ath10k_ahb_hif_ops = { + .tx_sg = ath10k_pci_hif_tx_sg, + .diag_read = ath10k_pci_hif_diag_read, + .diag_write = ath10k_pci_diag_write_mem, + .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg, + .start = ath10k_ahb_hif_start, + .stop = ath10k_ahb_hif_stop, + .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe, + .get_default_pipe = ath10k_pci_hif_get_default_pipe, + .send_complete_check = ath10k_pci_hif_send_complete_check, + .get_free_queue_number = ath10k_pci_hif_get_free_queue_number, + .power_up = ath10k_ahb_hif_power_up, + .power_down = ath10k_pci_hif_power_down, + .read32 = ath10k_ahb_read32, + .write32 = ath10k_ahb_write32, +}; + +static const struct ath10k_bus_ops ath10k_ahb_bus_ops = { + .read32 = ath10k_ahb_read32, + .write32 = ath10k_ahb_write32, + .get_num_banks = ath10k_ahb_get_num_banks, +}; + static int ath10k_ahb_probe(struct platform_device *pdev) { + struct ath10k *ar; + struct ath10k_ahb *ar_ahb; + struct ath10k_pci *ar_pci; + const struct of_device_id *of_id; + enum ath10k_hw_rev hw_rev; + size_t size; + int ret; + u32 chip_id; + + of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "failed to find matching device tree id\n"); + return -EINVAL; + } + + hw_rev = (enum ath10k_hw_rev)of_id->data; + + size = sizeof(*ar_pci) + sizeof(*ar_ahb); + ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB, + hw_rev, &ath10k_ahb_hif_ops); + if (!ar) { + dev_err(&pdev->dev, "failed to allocate core\n"); + return -ENOMEM; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n"); + + ar_pci = ath10k_pci_priv(ar); + ar_ahb = ath10k_ahb_priv(ar); + + ar_ahb->pdev = pdev; + platform_set_drvdata(pdev, ar); + + ret = ath10k_ahb_resource_init(ar); + if (ret) + goto err_core_destroy; + + ar->dev_id = 0; + ar_pci->mem = ar_ahb->mem; + ar_pci->mem_len = ar_ahb->mem_len; + ar_pci->ar = ar; + ar_pci->bus_ops = &ath10k_ahb_bus_ops; + + ret = ath10k_pci_setup_resource(ar); + if (ret) { + ath10k_err(ar, "failed to setup resource: %d\n", ret); + goto err_resource_deinit; + } + + ath10k_pci_init_irq_tasklets(ar); + + ret = ath10k_ahb_request_irq_legacy(ar); + if (ret) + goto err_free_pipes; + + ret = ath10k_ahb_prepare_device(ar); + if (ret) + goto err_free_irq; + + ath10k_pci_ce_deinit(ar); + + chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS); + if (chip_id == 0xffffffff) { + ath10k_err(ar, "failed to get chip id\n"); + goto err_halt_device; + } + + ret = ath10k_core_register(ar, chip_id); + if (ret) { + ath10k_err(ar, "failed to register driver core: %d\n", ret); + goto err_halt_device; + } + return 0; + +err_halt_device: + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + +err_free_irq: + ath10k_ahb_release_irq_legacy(ar); + +err_free_pipes: + ath10k_pci_free_pipes(ar); + +err_resource_deinit: + ath10k_ahb_resource_deinit(ar); + +err_core_destroy: + ath10k_core_destroy(ar); + platform_set_drvdata(pdev, NULL); + + return ret; } static int ath10k_ahb_remove(struct platform_device *pdev) { + struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_ahb *ar_ahb; + + if (!ar) + return -EINVAL; + + ar_ahb = ath10k_ahb_priv(ar); + + if (!ar_ahb) + return -EINVAL; + + ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n"); + + ath10k_core_unregister(ar); + ath10k_ahb_irq_disable(ar); + ath10k_ahb_release_irq_legacy(ar); + ath10k_pci_release_resource(ar); + ath10k_ahb_halt_chip(ar); + ath10k_ahb_clock_disable(ar); + ath10k_ahb_resource_deinit(ar); + ath10k_core_destroy(ar); + + platform_set_drvdata(pdev, NULL); + return 0; } diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h index 5bd01b49132b..d43e375215c8 100644 --- a/drivers/net/wireless/ath/ath10k/ahb.h +++ b/drivers/net/wireless/ath/ath10k/ahb.h @@ -48,6 +48,9 @@ struct ath10k_ahb { #define ATH10K_TCSR_REG_BASE 0x1900000 #define ATH10K_TCSR_REG_SIZE 0x80000 +#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020 +#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014 + #define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030 #define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000 @@ -63,6 +66,8 @@ struct ath10k_ahb { #define AHB_AXI_BUS_HALT_REQ 1 #define AHB_AXI_BUS_HALT_ACK 1 +#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1 + int ath10k_ahb_init(void); void ath10k_ahb_exit(void); diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index f57a37bfc9f6..4217bdbe9f01 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -605,6 +605,7 @@ enum ath10k_hw_4addr_pad { #define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address #define FW_IND_EVENT_PENDING 1 #define FW_IND_INITIALIZED 2 +#define FW_IND_HOST_READY 0x80000000 /* HOST_REG interrupt from firmware */ #define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask -- cgit v1.2.3 From 1286558e45fd90fd0faf5d41df35f511c51f25e7 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Fri, 22 Jan 2016 01:56:30 +0100 Subject: ath9k: do not limit the number of DFS channel interfaces to 1 I think this limit was added when CSA with multiple interfaces wasn't working yet. It should no longer be necessary Signed-off-by: Felix Fietkau Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/init.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index ab7a1ac37849..d4e0ac946c3a 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -751,14 +751,6 @@ static const struct ieee80211_iface_combination if_comb_multi[] = { #endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */ -static const struct ieee80211_iface_limit if_dfs_limits[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_ADHOC) }, -}; - static const struct ieee80211_iface_combination if_comb[] = { { .limits = if_limits, @@ -766,6 +758,11 @@ static const struct ieee80211_iface_combination if_comb[] = { .max_interfaces = 2048, .num_different_channels = 1, .beacon_int_infra_match = true, +#ifdef CONFIG_ATH9K_DFS_CERTIFIED + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40), +#endif }, { .limits = wds_limits, @@ -774,18 +771,6 @@ static const struct ieee80211_iface_combination if_comb[] = { .num_different_channels = 1, .beacon_int_infra_match = true, }, -#ifdef CONFIG_ATH9K_DFS_CERTIFIED - { - .limits = if_dfs_limits, - .n_limits = ARRAY_SIZE(if_dfs_limits), - .max_interfaces = 1, - .num_different_channels = 1, - .beacon_int_infra_match = true, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40), - } -#endif }; #ifdef CONFIG_ATH9K_CHANNEL_CONTEXT -- cgit v1.2.3 From 4c1ce07bcf62536ed42a4ba43e5fb580be61ac48 Mon Sep 17 00:00:00 2001 From: Uri Mashiach Date: Wed, 30 Dec 2015 15:35:31 +0200 Subject: wlcore/wl12xx: spi: add power operation function The power function uses a consumer regulator access to update the WiFi enable GPIO value. Signed-off-by: Uri Mashiach Tested-By: Sebastian Reichel Reviewed-by: Grygorii Strashko Acked-by: Igor Grinberg Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/spi.c | 39 ++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 44f059f7f34e..dec151247d0d 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "wlcore.h" #include "wl12xx_80211.h" @@ -81,6 +82,7 @@ struct wl12xx_spi_glue { struct device *dev; struct platform_device *core; + struct regulator *reg; /* Power regulator */ }; static void wl12xx_spi_reset(struct device *child) @@ -318,11 +320,40 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr, return 0; } +/** + * wl12xx_spi_set_power - power on/off the wl12xx unit + * @child: wl12xx device handle. + * @enable: true/false to power on/off the unit. + * + * use the WiFi enable regulator to enable/disable the WiFi unit. + */ +static int wl12xx_spi_set_power(struct device *child, bool enable) +{ + int ret = 0; + struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent); + + WARN_ON(!glue->reg); + + /* Update regulator state */ + if (enable) { + ret = regulator_enable(glue->reg); + if (ret) + dev_err(child, "Power enable failure\n"); + } else { + ret = regulator_disable(glue->reg); + if (ret) + dev_err(child, "Power disable failure\n"); + } + + return ret; +} + static struct wl1271_if_operations spi_ops = { .read = wl12xx_spi_raw_read, .write = wl12xx_spi_raw_write, .reset = wl12xx_spi_reset, .init = wl12xx_spi_init, + .power = wl12xx_spi_set_power, .set_block_size = NULL, }; @@ -353,6 +384,14 @@ static int wl1271_probe(struct spi_device *spi) * comes from the board-peripherals file */ spi->bits_per_word = 32; + glue->reg = devm_regulator_get(&spi->dev, "vwlan"); + if (PTR_ERR(glue->reg) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (IS_ERR(glue->reg)) { + dev_err(glue->dev, "can't get regulator\n"); + return PTR_ERR(glue->reg); + } + ret = spi_setup(spi); if (ret < 0) { dev_err(glue->dev, "spi_setup failed\n"); -- cgit v1.2.3 From 04654c386145239c8bcb35878b0b0537ce916766 Mon Sep 17 00:00:00 2001 From: Uri Mashiach Date: Wed, 30 Dec 2015 15:35:32 +0200 Subject: wlcore/wl12xx: spi: add device tree support Add DT support for the wl1271 SPI WiFi. Add documentation file for the wl1271 SPI WiFi. Signed-off-by: Uri Mashiach Acked-by: Rob Herring Tested-By: Sebastian Reichel Reviewed-by: Grygorii Strashko Acked-by: Igor Grinberg Signed-off-by: Kalle Valo --- .../bindings/net/wireless/ti,wlcore,spi.txt | 36 +++++++++++++++++ drivers/net/wireless/ti/wlcore/Kconfig | 2 +- drivers/net/wireless/ti/wlcore/spi.c | 47 ++++++++++++++++++++-- 3 files changed, 80 insertions(+), 5 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt new file mode 100644 index 000000000000..9180724e182c --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt @@ -0,0 +1,36 @@ +* Texas Instruments wl1271 wireless lan controller + +The wl1271 chip can be connected via SPI or via SDIO. This +document describes the binding for the SPI connected chip. + +Required properties: +- compatible : Should be "ti,wl1271" +- reg : Chip select address of device +- spi-max-frequency : Maximum SPI clocking speed of device in Hz +- ref-clock-frequency : Reference clock frequency +- interrupt-parent, interrupts : + Should contain parameters for 1 interrupt line. + Interrupt parameters: parent, line number, type. +- vwlan-supply : Point the node of the regulator that powers/enable the wl1271 chip + +Optional properties: +- clock-xtal : boolean, clock is generated from XTAL + +- Please consult Documentation/devicetree/bindings/spi/spi-bus.txt + for optional SPI connection related properties, + +Examples: + +&spi1 { + wl1271@1 { + compatible = "ti,wl1271"; + + reg = <1>; + spi-max-frequency = <48000000>; + clock-xtal; + ref-clock-frequency = <38400000>; + interrupt-parent = <&gpio3>; + interrupts = <8 IRQ_TYPE_LEVEL_HIGH>; + vwlan-supply = <&vwlan_fixed>; + }; +}; diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig index 969c9d79bfc8..8a8f1e711384 100644 --- a/drivers/net/wireless/ti/wlcore/Kconfig +++ b/drivers/net/wireless/ti/wlcore/Kconfig @@ -13,7 +13,7 @@ config WLCORE config WLCORE_SPI tristate "TI wlcore SPI support" - depends on WLCORE && SPI_MASTER + depends on WLCORE && SPI_MASTER && OF select CRC7 ---help--- This module adds support for the SPI interface of adapters using diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index dec151247d0d..020ac1a4b408 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include "wlcore.h" @@ -357,6 +358,39 @@ static struct wl1271_if_operations spi_ops = { .set_block_size = NULL, }; +static const struct of_device_id wlcore_spi_of_match_table[] = { + { .compatible = "ti,wl1271" }, + { } +}; +MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table); + +/** + * wlcore_probe_of - DT node parsing. + * @spi: SPI slave device parameters. + * @res: resource parameters. + * @glue: wl12xx SPI bus to slave device glue parameters. + * @pdev_data: wlcore device parameters + */ +static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, + struct wlcore_platdev_data *pdev_data) +{ + struct device_node *dt_node = spi->dev.of_node; + int ret; + + if (of_find_property(dt_node, "clock-xtal", NULL)) + pdev_data->ref_clock_xtal = true; + + ret = of_property_read_u32(dt_node, "ref-clock-frequency", + &pdev_data->ref_clock_freq); + if (IS_ERR_VALUE(ret)) { + dev_err(glue->dev, + "can't get reference clock frequency (%d)\n", ret); + return ret; + } + + return 0; +} + static int wl1271_probe(struct spi_device *spi) { struct wl12xx_spi_glue *glue; @@ -366,8 +400,6 @@ static int wl1271_probe(struct spi_device *spi) memset(&pdev_data, 0x00, sizeof(pdev_data)); - /* TODO: add DT parsing when needed */ - pdev_data.if_ops = &spi_ops; glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL); @@ -392,6 +424,13 @@ static int wl1271_probe(struct spi_device *spi) return PTR_ERR(glue->reg); } + ret = wlcore_probe_of(spi, glue, &pdev_data); + if (IS_ERR_VALUE(ret)) { + dev_err(glue->dev, + "can't get device tree parameters (%d)\n", ret); + return ret; + } + ret = spi_setup(spi); if (ret < 0) { dev_err(glue->dev, "spi_setup failed\n"); @@ -409,7 +448,7 @@ static int wl1271_probe(struct spi_device *spi) memset(res, 0x00, sizeof(res)); res[0].start = spi->irq; - res[0].flags = IORESOURCE_IRQ; + res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq); res[0].name = "irq"; ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); @@ -447,10 +486,10 @@ static int wl1271_remove(struct spi_device *spi) return 0; } - static struct spi_driver wl1271_spi_driver = { .driver = { .name = "wl1271_spi", + .of_match_table = of_match_ptr(wlcore_spi_of_match_table), }, .probe = wl1271_probe, -- cgit v1.2.3 From 2fd5c6ed0b4fbf3c2d9314a24c82862ce5254d42 Mon Sep 17 00:00:00 2001 From: chunfan chen Date: Wed, 6 Jan 2016 23:40:47 -0800 Subject: mwifiex: firmware download enhancements Same chip is being used by WLAN as well as bluetooth drivers. Each driver needs to check during initialisation if firmware is already active or it needs to be freshly downloaded. If one driver has started downloading the firmware, other finds the winner flag as false. mwifiex_check_fw_status() checks firmware status and also check if WLAN is the winner for firmware downloading. Once we detect that other interface is downloading the firmware, we call this routine again with max poll count to wait until firmware is ready. This patch splits the routine to avoid checking winner status unnecessarily multiple times and ensures that correct messages are displayed to user. Firmware status poll count is also increased to 150. Signed-off-by: Chunfan Chen Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/fw.h | 2 +- drivers/net/wireless/marvell/mwifiex/init.c | 16 +++++++++--- drivers/net/wireless/marvell/mwifiex/main.h | 1 + drivers/net/wireless/marvell/mwifiex/pcie.c | 40 +++++++++++++++++------------ drivers/net/wireless/marvell/mwifiex/sdio.c | 33 ++++++++++++++---------- 5 files changed, 57 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index ced7af2be29a..426e76ade30c 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -96,7 +96,7 @@ enum KEY_TYPE_ID { #define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2) #define MAX_POLL_TRIES 100 -#define MAX_FIRMWARE_POLL_TRIES 100 +#define MAX_FIRMWARE_POLL_TRIES 150 #define FIRMWARE_READY_SDIO 0xfedc #define FIRMWARE_READY_PCIE 0xfedcba00 diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c index 6f7876ec31b7..517653b3adab 100644 --- a/drivers/net/wireless/marvell/mwifiex/init.c +++ b/drivers/net/wireless/marvell/mwifiex/init.c @@ -741,8 +741,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, u32 poll_num = 1; if (adapter->if_ops.check_fw_status) { - adapter->winner = 0; - /* check if firmware is already running */ ret = adapter->if_ops.check_fw_status(adapter, poll_num); if (!ret) { @@ -750,13 +748,23 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter, "WLAN FW already running! Skip FW dnld\n"); return 0; } + } + + /* check if we are the winner for downloading FW */ + if (adapter->if_ops.check_winner_status) { + adapter->winner = 0; + ret = adapter->if_ops.check_winner_status(adapter); poll_num = MAX_FIRMWARE_POLL_TRIES; + if (ret) { + mwifiex_dbg(adapter, MSG, + "WLAN read winner status failed!\n"); + return ret; + } - /* check if we are the winner for downloading FW */ if (!adapter->winner) { mwifiex_dbg(adapter, MSG, - "FW already running! Skip FW dnld\n"); + "WLAN is not the winner! Skip FW dnld\n"); goto poll_fw; } } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 2f7f478ce04b..c08be798905f 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -791,6 +791,7 @@ struct mwifiex_if_ops { int (*init_if) (struct mwifiex_adapter *); void (*cleanup_if) (struct mwifiex_adapter *); int (*check_fw_status) (struct mwifiex_adapter *, u32); + int (*check_winner_status)(struct mwifiex_adapter *); int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *); int (*register_dev) (struct mwifiex_adapter *); void (*unregister_dev) (struct mwifiex_adapter *); diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 6d0dc40e20e5..f448d7ca4e7c 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2007,14 +2007,12 @@ done: /* * This function checks the firmware status in card. - * - * The winner interface is also determined by this function. */ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { int ret = 0; - u32 firmware_stat, winner_status; + u32 firmware_stat; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; u32 tries; @@ -2054,19 +2052,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) } } - if (ret) { - if (mwifiex_read_reg(adapter, reg->fw_status, - &winner_status)) - ret = -1; - else if (!winner_status) { - mwifiex_dbg(adapter, INFO, - "PCI-E is the winner\n"); - adapter->winner = 1; - } else { - mwifiex_dbg(adapter, ERROR, - "PCI-E is not the winner <%#x,%d>, exit dnld\n", - ret, adapter->winner); - } + return ret; +} + +/* This function checks if WLAN is the winner. + */ +static int +mwifiex_check_winner_status(struct mwifiex_adapter *adapter) +{ + u32 winner = 0; + int ret = 0; + struct pcie_service_card *card = adapter->card; + const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + + if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) { + ret = -1; + } else if (!winner) { + mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n"); + adapter->winner = 1; + } else { + mwifiex_dbg(adapter, ERROR, + "PCI-E is not the winner <%#x,%d>, exit dnld\n", + ret, adapter->winner); } return ret; @@ -2675,6 +2682,7 @@ static struct mwifiex_if_ops pcie_ops = { .init_if = mwifiex_pcie_init, .cleanup_if = mwifiex_pcie_cleanup, .check_fw_status = mwifiex_check_fw_status, + .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 4c8cae682c89..33771d3fd843 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1039,19 +1039,14 @@ done: /* * This function checks the firmware status in card. - * - * The winner interface is also determined by this function. */ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num) { - struct sdio_mmc_card *card = adapter->card; int ret = 0; u16 firmware_stat; u32 tries; - u8 winner_status; - /* Wait for firmware initialization event */ for (tries = 0; tries < poll_num; tries++) { ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat); if (ret) @@ -1065,16 +1060,25 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, } } - if (ret) { - if (mwifiex_read_reg - (adapter, card->reg->status_reg_0, &winner_status)) - winner_status = 0; + return ret; +} + +/* This function checks if WLAN is the winner. + */ +static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter) +{ + int ret = 0; + u8 winner = 0; + struct sdio_mmc_card *card = adapter->card; + + if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner)) + return -1; + + if (winner) + adapter->winner = 0; + else + adapter->winner = 1; - if (winner_status) - adapter->winner = 0; - else - adapter->winner = 1; - } return ret; } @@ -2620,6 +2624,7 @@ static struct mwifiex_if_ops sdio_ops = { .init_if = mwifiex_init_sdio, .cleanup_if = mwifiex_cleanup_sdio, .check_fw_status = mwifiex_check_fw_status, + .check_winner_status = mwifiex_check_winner_status, .prog_fw = mwifiex_prog_fw_w_helper, .register_dev = mwifiex_register_dev, .unregister_dev = mwifiex_unregister_dev, -- cgit v1.2.3 From dc386ce76dedaeeaaf006fceb6ed8cf2e20ff026 Mon Sep 17 00:00:00 2001 From: chunfan chen Date: Wed, 6 Jan 2016 23:40:48 -0800 Subject: mwifiex: fix IBSS data path issue. The port_open flag is not applicable for IBSS mode. IBSS data path was broken when port_open flag was introduced. This patch fixes the problem by correcting the checks. Fixes: 5c8946330abfa4c ("mwifiex: enable traffic only when port is open") Signed-off-by: chunfan chen Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_event.c | 10 ++++++---- drivers/net/wireless/marvell/mwifiex/wmm.c | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index ff3ee9dfbbd5..23bae87d4d3d 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -607,11 +607,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) case EVENT_PS_AWAKE: mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n"); - if (!adapter->pps_uapsd_mode && priv->port_open && + if (!adapter->pps_uapsd_mode && + (priv->port_open || + (priv->bss_mode == NL80211_IFTYPE_ADHOC)) && priv->media_connected && adapter->sleep_period.period) { - adapter->pps_uapsd_mode = true; - mwifiex_dbg(adapter, EVENT, - "event: PPS/UAPSD mode activated\n"); + adapter->pps_uapsd_mode = true; + mwifiex_dbg(adapter, EVENT, + "event: PPS/UAPSD mode activated\n"); } adapter->tx_lock_flag = false; if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) { diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index acccd6734e3b..499e5a741c62 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -475,7 +475,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter) priv = adapter->priv[i]; if (!priv) continue; - if (!priv->port_open) + if (!priv->port_open && + (priv->bss_mode != NL80211_IFTYPE_ADHOC)) continue; if (adapter->if_ops.is_port_ready && !adapter->if_ops.is_port_ready(priv)) @@ -1099,7 +1100,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter, priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv; - if (!priv_tmp->port_open || + if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) && + !priv_tmp->port_open) || (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0)) continue; -- cgit v1.2.3 From 17934b6a32b2fdfcc0a9e83d17c780f15627aa30 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 6 Jan 2016 23:40:49 -0800 Subject: mwifiex: add debugfs file to read chip information This patch add 'verext' debugfs item, which can be used to get detailed chip specific information from our firmware. Examples: echo "1" > /sys/kernel/debug/mwifiex/mlan0/verext cat /sys/kernel/debug/mwifiex/mlan0/verext Signed-off-by: Shengzhen Li Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/README | 10 +++++ drivers/net/wireless/marvell/mwifiex/debugfs.c | 51 +++++++++++++++++++++++- drivers/net/wireless/marvell/mwifiex/main.h | 3 +- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 3 +- 4 files changed, 63 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/README b/drivers/net/wireless/marvell/mwifiex/README index 2f0f9b5609d0..24e649b1eb24 100644 --- a/drivers/net/wireless/marvell/mwifiex/README +++ b/drivers/net/wireless/marvell/mwifiex/README @@ -237,4 +237,14 @@ device_dump cat fw_dump +verext + This command is used to get extended firmware version string using + different configuration parameters. + + Usage: + echo "[version_str_sel]" > verext + cat verext + + [version_str_sel]: firmware support several extend version + string cases, include 0/1/10/20/21/99 =============================================================================== diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index 0b9c580af988..df28836a1d11 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -95,8 +95,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf, mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1); - if (!priv->version_str[0]) - mwifiex_get_ver_ext(priv); + mwifiex_get_ver_ext(priv, 0); p += sprintf(p, "driver_name = " "\"mwifiex\"\n"); p += sprintf(p, "driver_version = %s", fmt); @@ -583,6 +582,52 @@ done: return ret; } +/* debugfs verext file write handler. + * This function is called when the 'verext' file is opened for write + */ +static ssize_t +mwifiex_verext_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + int ret; + u32 versionstrsel; + struct mwifiex_private *priv = (void *)file->private_data; + char buf[16]; + + memset(buf, 0, sizeof(buf)); + + if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) + return -EFAULT; + + ret = kstrtou32(buf, 10, &versionstrsel); + if (ret) + return ret; + + priv->versionstrsel = versionstrsel; + + return count; +} + +/* Proc verext file read handler. + * This function is called when the 'verext' file is opened for reading + * This function can be used read driver exteneed verion string. + */ +static ssize_t +mwifiex_verext_read(struct file *file, char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct mwifiex_private *priv = + (struct mwifiex_private *)file->private_data; + char buf[256]; + int ret; + + mwifiex_get_ver_ext(priv, priv->versionstrsel); + ret = snprintf(buf, sizeof(buf), "version string: %s\n", + priv->version_str); + + return simple_read_from_buffer(ubuf, count, ppos, buf, ret); +} + /* Proc memrw file write handler. * This function is called when the 'memrw' file is opened for writing * This function can be used to write to a memory location. @@ -940,6 +985,7 @@ MWIFIEX_DFS_FILE_OPS(histogram); MWIFIEX_DFS_FILE_OPS(debug_mask); MWIFIEX_DFS_FILE_OPS(timeshare_coex); MWIFIEX_DFS_FILE_WRITE_OPS(reset); +MWIFIEX_DFS_FILE_OPS(verext); /* * This function creates the debug FS directory structure and the files. @@ -968,6 +1014,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv) MWIFIEX_DFS_ADD_FILE(debug_mask); MWIFIEX_DFS_ADD_FILE(timeshare_coex); MWIFIEX_DFS_ADD_FILE(reset); + MWIFIEX_DFS_ADD_FILE(verext); } /* diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index c08be798905f..35ab052c8a36 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -616,6 +616,7 @@ struct mwifiex_private { spinlock_t curr_bcn_buf_lock; struct wireless_dev wdev; struct mwifiex_chan_freq_power cfp; + u32 versionstrsel; char version_str[128]; #ifdef CONFIG_DEBUG_FS struct dentry *dfs_dev_dir; @@ -1418,7 +1419,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len); -int mwifiex_get_ver_ext(struct mwifiex_private *priv); +int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel); int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, struct ieee80211_channel *chan, diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 6a4fc5d183cf..210b257aad6b 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -1114,11 +1114,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, * with requisite parameters and calls the IOCTL handler. */ int -mwifiex_get_ver_ext(struct mwifiex_private *priv) +mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel) { struct mwifiex_ver_ext ver_ext; memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext)); + ver_ext.version_str_sel = version_str_sel; if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT, HostCmd_ACT_GEN_GET, 0, &ver_ext, true)) return -1; -- cgit v1.2.3 From 68f37e5d7a2e00306adab033fba6c3042b33e8e1 Mon Sep 17 00:00:00 2001 From: Andrzej Hajda Date: Thu, 7 Jan 2016 14:28:50 +0100 Subject: wlcore: fix error handling in wlcore_event_fw_logger wlcore_read/wlcore_write can return negative values so it should be assigned to signed variable. The problem has been detected using proposed semantic patch scripts/coccinelle/tests/unsigned_lesser_than_zero.cocci [1]. [1]: http://permalink.gmane.org/gmane.linux.kernel/2120705 Fixes: 3719c17e1816 ("wlcore/wl18xx: fw logger over sdio") Signed-off-by: Andrzej Hajda Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/event.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index c96405498bf4..4b59f67724de 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -38,7 +38,7 @@ int wlcore_event_fw_logger(struct wl1271 *wl) { - u32 ret; + int ret; struct fw_logger_information fw_log; u8 *buffer; u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS; -- cgit v1.2.3 From 99074fc1e67b374b5c72406a23ac01fed806d634 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Mon, 11 Jan 2016 02:16:40 -0800 Subject: mwifiex: enable pcie MSIx interrupt mode support Newer pcie chipsets (8997 onwards) support MSIx. This patch enables it. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 187 +++++++++++++++++++++++----- drivers/net/wireless/marvell/mwifiex/pcie.h | 17 +++ 2 files changed, 176 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index f448d7ca4e7c..5e154649c6a2 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2082,20 +2082,28 @@ mwifiex_check_winner_status(struct mwifiex_adapter *adapter) /* * This function reads the interrupt status from card. */ -static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) +static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, + int msg_id) { u32 pcie_ireg; unsigned long flags; + struct pcie_service_card *card = adapter->card; if (!mwifiex_pcie_ok_to_access_hw(adapter)) return; - if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) { - mwifiex_dbg(adapter, ERROR, "Read register failed\n"); - return; - } + if (card->msix_enable && msg_id >= 0) { + pcie_ireg = BIT(msg_id); + } else { + if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, + &pcie_ireg)) { + mwifiex_dbg(adapter, ERROR, "Read register failed\n"); + return; + } + + if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg) + return; - if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) { mwifiex_pcie_disable_host_int(adapter); @@ -2106,20 +2114,23 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) "Write register failed\n"); return; } - spin_lock_irqsave(&adapter->int_lock, flags); - adapter->int_status |= pcie_ireg; - spin_unlock_irqrestore(&adapter->int_lock, flags); - - if (!adapter->pps_uapsd_mode && - adapter->ps_state == PS_STATE_SLEEP && - mwifiex_pcie_ok_to_access_hw(adapter)) { - /* Potentially for PCIe we could get other - * interrupts like shared. Don't change power - * state until cookie is set */ - adapter->ps_state = PS_STATE_AWAKE; - adapter->pm_wakeup_fw_try = false; - del_timer(&adapter->wakeup_timer); - } + } + + spin_lock_irqsave(&adapter->int_lock, flags); + adapter->int_status |= pcie_ireg; + spin_unlock_irqrestore(&adapter->int_lock, flags); + mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg); + + if (!adapter->pps_uapsd_mode && + adapter->ps_state == PS_STATE_SLEEP && + mwifiex_pcie_ok_to_access_hw(adapter)) { + /* Potentially for PCIe we could get other + * interrupts like shared. Don't change power + * state until cookie is set + */ + adapter->ps_state = PS_STATE_AWAKE; + adapter->pm_wakeup_fw_try = false; + del_timer(&adapter->wakeup_timer); } } @@ -2131,7 +2142,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) */ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) { - struct pci_dev *pdev = (struct pci_dev *)context; + struct mwifiex_msix_context *ctx = context; + struct pci_dev *pdev = ctx->dev; struct pcie_service_card *card; struct mwifiex_adapter *adapter; @@ -2151,7 +2163,11 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context) if (adapter->surprise_removed) goto exit; - mwifiex_interrupt_status(adapter); + if (card->msix_enable) + mwifiex_interrupt_status(adapter, ctx->msg_id); + else + mwifiex_interrupt_status(adapter, -1); + mwifiex_queue_main_work(adapter); exit: @@ -2171,7 +2187,7 @@ exit: * In case of Rx packets received, the packets are uploaded from card to * host and processed accordingly. */ -static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) +static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) { int ret; u32 pcie_ireg; @@ -2251,6 +2267,69 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) return 0; } +static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter) +{ + int ret; + u32 pcie_ireg; + unsigned long flags; + + spin_lock_irqsave(&adapter->int_lock, flags); + /* Clear out unused interrupts */ + pcie_ireg = adapter->int_status; + adapter->int_status = 0; + spin_unlock_irqrestore(&adapter->int_lock, flags); + + if (pcie_ireg & HOST_INTR_DNLD_DONE) { + mwifiex_dbg(adapter, INTR, + "info: TX DNLD Done\n"); + ret = mwifiex_pcie_send_data_complete(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_UPLD_RDY) { + mwifiex_dbg(adapter, INTR, + "info: Rx DATA\n"); + ret = mwifiex_pcie_process_recv_data(adapter); + if (ret) + return ret; + } + if (pcie_ireg & HOST_INTR_EVENT_RDY) { + mwifiex_dbg(adapter, INTR, + "info: Rx EVENT\n"); + ret = mwifiex_pcie_process_event_ready(adapter); + if (ret) + return ret; + } + + if (pcie_ireg & HOST_INTR_CMD_DONE) { + if (adapter->cmd_sent) { + mwifiex_dbg(adapter, INTR, + "info: CMD sent Interrupt\n"); + adapter->cmd_sent = false; + } + /* Handle command response */ + ret = mwifiex_pcie_process_cmd_complete(adapter); + if (ret) + return ret; + } + + mwifiex_dbg(adapter, INTR, + "info: cmd_sent=%d data_sent=%d\n", + adapter->cmd_sent, adapter->data_sent); + + return 0; +} + +static int mwifiex_process_int_status(struct mwifiex_adapter *adapter) +{ + struct pcie_service_card *card = adapter->card; + + if (card->msix_enable) + return mwifiex_process_msix_int(adapter); + else + return mwifiex_process_pcie_int(adapter); +} + /* * This function downloads data from driver to card. * @@ -2602,10 +2681,43 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter) static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) { - int ret; + int ret, i, j; struct pcie_service_card *card = adapter->card; struct pci_dev *pdev = card->dev; + if (card->pcie.reg->msix_support) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + card->msix_entries[i].entry = i; + ret = pci_enable_msix_exact(pdev, card->msix_entries, + MWIFIEX_NUM_MSIX_VECTORS); + if (!ret) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) { + card->msix_ctx[i].dev = pdev; + card->msix_ctx[i].msg_id = i; + + ret = request_irq(card->msix_entries[i].vector, + mwifiex_pcie_interrupt, 0, + "MWIFIEX_PCIE_MSIX", + &card->msix_ctx[i]); + if (ret) + break; + } + + if (ret) { + mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n", + ret); + for (j = 0; j < i; j++) + free_irq(card->msix_entries[j].vector, + &card->msix_ctx[i]); + pci_disable_msix(pdev); + } else { + mwifiex_dbg(adapter, MSG, "MSIx enabled!"); + card->msix_enable = 1; + return 0; + } + } + } + if (pci_enable_msi(pdev) != 0) pci_disable_msi(pdev); else @@ -2613,8 +2725,10 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter) mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable); + card->share_irq_ctx.dev = pdev; + card->share_irq_ctx.msg_id = -1; ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED, - "MRVL_PCIE", pdev); + "MRVL_PCIE", &card->share_irq_ctx); if (ret) { pr_err("request_irq failed: ret=%d\n", ret); adapter->card = NULL; @@ -2660,11 +2774,28 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter) { struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg; + struct pci_dev *pdev = card->dev; + int i; if (card) { - mwifiex_dbg(adapter, INFO, - "%s(): calling free_irq()\n", __func__); - free_irq(card->dev->irq, card->dev); + if (card->msix_enable) { + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + synchronize_irq(card->msix_entries[i].vector); + + for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) + free_irq(card->msix_entries[i].vector, + &card->msix_ctx[i]); + + card->msix_enable = 0; + pci_disable_msix(pdev); + } else { + mwifiex_dbg(adapter, INFO, + "%s(): calling free_irq()\n", __func__); + free_irq(card->dev->irq, &card->share_irq_ctx); + + if (card->msi_enable) + pci_disable_msi(pdev); + } reg = card->pcie.reg; if (reg->sleep_cookie) diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 6fc28737b576..9700ac355e55 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -135,6 +135,7 @@ struct mwifiex_pcie_card_reg { u16 fw_dump_ctrl; u16 fw_dump_start; u16 fw_dump_end; + u8 msix_support; }; static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { @@ -166,6 +167,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = { .ring_tx_start_ptr = 0, .pfu_enabled = 0, .sleep_cookie = 1, + .msix_support = 0, }; static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { @@ -200,6 +202,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { .fw_dump_ctrl = 0xcf4, .fw_dump_start = 0xcf8, .fw_dump_end = 0xcff, + .msix_support = 0, }; static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { @@ -231,6 +234,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, .pfu_enabled = 1, .sleep_cookie = 0, + .msix_support = 1, }; struct mwifiex_pcie_device { @@ -290,6 +294,13 @@ struct mwifiex_pfu_buf_desc { u32 reserved; } __packed; +#define MWIFIEX_NUM_MSIX_VECTORS 4 + +struct mwifiex_msix_context { + struct pci_dev *dev; + u16 msg_id; +}; + struct pcie_service_card { struct pci_dev *dev; struct mwifiex_adapter *adapter; @@ -327,6 +338,12 @@ struct pcie_service_card { void __iomem *pci_mmap; void __iomem *pci_mmap1; int msi_enable; + int msix_enable; +#ifdef CONFIG_PCI + struct msix_entry msix_entries[MWIFIEX_NUM_MSIX_VECTORS]; +#endif + struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS]; + struct mwifiex_msix_context share_irq_ctx; }; static inline int -- cgit v1.2.3 From dc896b15fe94345912ff13fa8eb581b5c6acccde Mon Sep 17 00:00:00 2001 From: Shengzhen Li Date: Tue, 12 Jan 2016 05:43:16 -0800 Subject: mwifiex: fix power state out of sync problem It's been observed that driver's power state goes out of sync with firmware in some corner cases. When this happens, driver tries to download the data to firmware assuming it's awake which causes Tx data timeout. Main thread will process interrupt as soon as interrupt handler fills 'int_status' variable. This patch fixes the race issue by updating 'int_status' at the end of mwifiex_interrupt_status(). Signed-off-by: Shengzhen Li Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 5e154649c6a2..918e04954afe 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2116,11 +2116,6 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, } } - spin_lock_irqsave(&adapter->int_lock, flags); - adapter->int_status |= pcie_ireg; - spin_unlock_irqrestore(&adapter->int_lock, flags); - mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg); - if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP && mwifiex_pcie_ok_to_access_hw(adapter)) { @@ -2132,6 +2127,11 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter, adapter->pm_wakeup_fw_try = false; del_timer(&adapter->wakeup_timer); } + + spin_lock_irqsave(&adapter->int_lock, flags); + adapter->int_status |= pcie_ireg; + spin_unlock_irqrestore(&adapter->int_lock, flags); + mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg); } /* -- cgit v1.2.3 From 0c9b7f22e8e1f3aa5b88d7530db8b3a7d647adb6 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Wed, 13 Jan 2016 01:26:52 -0800 Subject: mwifiex: add schedule scan support This patch add sched scan support for mwifiex, include cfg80211 sched_scan_start/sched_scan_stop handler, corresponding bgscan command path and event handler. Signed-off-by: Xinming Hu Signed-off-by: chunfan chen Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 134 +++++++++++ drivers/net/wireless/marvell/mwifiex/fw.h | 59 +++++ drivers/net/wireless/marvell/mwifiex/ioctl.h | 1 + drivers/net/wireless/marvell/mwifiex/main.c | 7 + drivers/net/wireless/marvell/mwifiex/main.h | 10 + drivers/net/wireless/marvell/mwifiex/scan.c | 261 +++++++++++++++++++++ drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 4 + drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c | 3 + drivers/net/wireless/marvell/mwifiex/sta_event.c | 7 + drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 7 + 10 files changed, 493 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index e7adef72c05f..c27c6cc5f93e 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1962,6 +1962,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; @@ -2217,6 +2220,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, "info: Trying to associate to %s and bssid %pM\n", (char *)sme->ssid, sme->bssid); + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); if (!ret) { @@ -2420,6 +2426,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return -EBUSY; } + if (!mwifiex_stop_bg_scan(priv)) + cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy); + user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL); if (!user_scan_cfg) return -ENOMEM; @@ -2487,6 +2496,121 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, return 0; } +/* CFG802.11 operation handler for sched_scan_start. + * + * This function issues a bgscan config request to the firmware based upon + * the user specified sched_scan configuration. On successful completion, + * firmware will generate BGSCAN_REPORT event, driver should issue bgscan + * query command to get sched_scan results from firmware. + */ +static int +mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy, + struct net_device *dev, + struct cfg80211_sched_scan_request *request) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + int i, offset; + struct ieee80211_channel *chan; + struct mwifiex_bg_scan_cfg *bgscan_cfg; + struct ieee_types_header *ie; + + if (!request || (!request->n_ssids && !request->n_match_sets)) { + wiphy_err(wiphy, "%s : Invalid Sched_scan parameters", + __func__); + return -EINVAL; + } + + wiphy_info(wiphy, "sched_scan start : n_ssids=%d n_match_sets=%d ", + request->n_ssids, request->n_match_sets); + wiphy_info(wiphy, "n_channels=%d interval=%d ie_len=%d\n", + request->n_channels, request->scan_plans->interval, + (int)request->ie_len); + + bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL); + if (!bgscan_cfg) + return -ENOMEM; + + if (priv->scan_request || priv->scan_aborting) + bgscan_cfg->start_later = true; + + bgscan_cfg->num_ssids = request->n_match_sets; + bgscan_cfg->ssid_list = request->match_sets; + + if (request->ie && request->ie_len) { + offset = 0; + for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) { + if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR) + continue; + priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_BGSCAN; + ie = (struct ieee_types_header *)(request->ie + offset); + memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len); + offset += sizeof(*ie) + ie->len; + + if (offset >= request->ie_len) + break; + } + } + + for (i = 0; i < min_t(u32, request->n_channels, + MWIFIEX_BG_SCAN_CHAN_MAX); i++) { + chan = request->channels[i]; + bgscan_cfg->chan_list[i].chan_number = chan->hw_value; + bgscan_cfg->chan_list[i].radio_type = chan->band; + + if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids) + bgscan_cfg->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_PASSIVE; + else + bgscan_cfg->chan_list[i].scan_type = + MWIFIEX_SCAN_TYPE_ACTIVE; + + bgscan_cfg->chan_list[i].scan_time = 0; + } + + bgscan_cfg->chan_per_scan = min_t(u32, request->n_channels, + MWIFIEX_BG_SCAN_CHAN_MAX); + + /* Use at least 15 second for per scan cycle */ + bgscan_cfg->scan_interval = (request->scan_plans->interval > + MWIFIEX_BGSCAN_INTERVAL) ? + request->scan_plans->interval : + MWIFIEX_BGSCAN_INTERVAL; + + bgscan_cfg->repeat_count = MWIFIEX_BGSCAN_REPEAT_COUNT; + bgscan_cfg->report_condition = MWIFIEX_BGSCAN_SSID_MATCH | + MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE; + bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; + bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; + bgscan_cfg->enable = true; + + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, + HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { + kfree(bgscan_cfg); + return -EFAULT; + } + + priv->sched_scanning = true; + + kfree(bgscan_cfg); + return 0; +} + +/* CFG802.11 operation handler for sched_scan_stop. + * + * This function issues a bgscan config command to disable + * previous bgscan configuration in the firmware + */ +static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy, + struct net_device *dev) +{ + struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); + + wiphy_info(wiphy, "sched scan stop!"); + mwifiex_stop_bg_scan(priv); + + return 0; +} + static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info, struct mwifiex_private *priv) { @@ -2848,6 +2972,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) mwifiex_dev_debugfs_remove(priv); #endif + if (priv->sched_scanning) + priv->sched_scanning = false; + mwifiex_stop_net_dev_queue(priv->netdev, adapter); skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) @@ -3701,6 +3828,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, .set_antenna = mwifiex_cfg80211_set_antenna, .del_station = mwifiex_cfg80211_del_station, + .sched_scan_start = mwifiex_cfg80211_sched_scan_start, + .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop, #ifdef CONFIG_PM .suspend = mwifiex_cfg80211_suspend, .resume = mwifiex_cfg80211_resume, @@ -3829,6 +3958,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME | WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD | WIPHY_FLAG_AP_UAPSD | + WIPHY_FLAG_SUPPORTS_SCHED_SCAN | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_HAS_CHANNEL_SWITCH | WIPHY_FLAG_PS_ON_BY_DEFAULT; @@ -3847,6 +3977,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter) NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; + wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH; + wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN; + wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH; + wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1; wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1; diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 426e76ade30c..07bdc2a96b8a 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -144,6 +144,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) #define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) #define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22) +#define TLV_TYPE_BGSCAN_START_LATER (PROPRIETARY_TLV_BASE_ID + 30) #define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) #define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32) #define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35) @@ -177,6 +178,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define TLV_TYPE_TX_PAUSE (PROPRIETARY_TLV_BASE_ID + 148) #define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154) #define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156) +#define TLV_TYPE_REPEAT_COUNT (PROPRIETARY_TLV_BASE_ID + 176) #define TLV_TYPE_MULTI_CHAN_INFO (PROPRIETARY_TLV_BASE_ID + 183) #define TLV_TYPE_MC_GROUP_INFO (PROPRIETARY_TLV_BASE_ID + 184) #define TLV_TYPE_TDLS_IDLE_TIMEOUT (PROPRIETARY_TLV_BASE_ID + 194) @@ -331,6 +333,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_802_11_MAC_ADDRESS 0x004D #define HostCmd_CMD_802_11D_DOMAIN_INFO 0x005b #define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e +#define HostCmd_CMD_802_11_BG_SCAN_CONFIG 0x006b #define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c #define HostCmd_CMD_WMM_GET_STATUS 0x0071 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075 @@ -523,6 +526,7 @@ enum P2P_MODES { #define EVENT_CHANNEL_REPORT_RDY 0x00000054 #define EVENT_TX_DATA_PAUSE 0x00000055 #define EVENT_EXT_SCAN_REPORT 0x00000058 +#define EVENT_BG_SCAN_STOPPED 0x00000065 #define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f #define EVENT_MULTI_CHAN_INFO 0x0000006a #define EVENT_TX_STATUS_REPORT 0x00000074 @@ -733,6 +737,16 @@ struct mwifiex_ie_types_num_probes { __le16 num_probes; } __packed; +struct mwifiex_ie_types_repeat_count { + struct mwifiex_ie_types_header header; + __le16 repeat_count; +} __packed; + +struct mwifiex_ie_types_bgscan_start_later { + struct mwifiex_ie_types_header header; + __le16 start_later; +} __packed; + struct mwifiex_ie_types_scan_chan_gap { struct mwifiex_ie_types_header header; /* time gap in TUs to be used between two consecutive channels scan */ @@ -1425,6 +1439,36 @@ struct mwifiex_user_scan_cfg { u16 scan_chan_gap; } __packed; +#define MWIFIEX_BG_SCAN_CHAN_MAX 38 +#define MWIFIEX_BSS_MODE_INFRA 1 +#define MWIFIEX_BGSCAN_ACT_GET 0x0000 +#define MWIFIEX_BGSCAN_ACT_SET 0x0001 +#define MWIFIEX_BGSCAN_ACT_SET_ALL 0xff01 +/** ssid match */ +#define MWIFIEX_BGSCAN_SSID_MATCH 0x0001 +/** ssid match and RSSI exceeded */ +#define MWIFIEX_BGSCAN_SSID_RSSI_MATCH 0x0004 +/**wait for all channel scan to complete to report scan result*/ +#define MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE 0x80000000 + +struct mwifiex_bg_scan_cfg { + u16 action; + u8 enable; + u8 bss_type; + u8 chan_per_scan; + u32 scan_interval; + u32 report_condition; + u8 num_probes; + u8 rssi_threshold; + u8 snr_threshold; + u16 repeat_count; + u16 start_later; + struct cfg80211_match_set *ssid_list; + u8 num_ssids; + struct mwifiex_user_scan_chan chan_list[MWIFIEX_BG_SCAN_CHAN_MAX]; + u16 scan_chan_gap; +} __packed; + struct ie_body { u8 grp_key_oui[4]; u8 ptk_cnt[2]; @@ -1470,6 +1514,20 @@ struct mwifiex_ie_types_bss_scan_info { __le64 tsf; } __packed; +struct host_cmd_ds_802_11_bg_scan_config { + __le16 action; + u8 enable; + u8 bss_type; + u8 chan_per_scan; + u8 reserved; + __le16 reserved1; + __le32 scan_interval; + __le32 reserved2; + __le32 report_condition; + __le16 reserved3; + u8 tlv[0]; +} __packed; + struct host_cmd_ds_802_11_bg_scan_query { u8 flush; } __packed; @@ -2124,6 +2182,7 @@ struct host_cmd_ds_command { struct host_cmd_ds_802_11_scan scan; struct host_cmd_ds_802_11_scan_ext ext_scan; struct host_cmd_ds_802_11_scan_rsp scan_resp; + struct host_cmd_ds_802_11_bg_scan_config bg_scan_config; struct host_cmd_ds_802_11_bg_scan_query bg_scan_query; struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp; struct host_cmd_ds_802_11_associate associate; diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 4f0174c64946..6333b163acef 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -414,6 +414,7 @@ struct mwifiex_ds_mef_cfg { #define MWIFIEX_VSIE_MASK_SCAN 0x01 #define MWIFIEX_VSIE_MASK_ASSOC 0x02 #define MWIFIEX_VSIE_MASK_ADHOC 0x04 +#define MWIFIEX_VSIE_MASK_BGSCAN 0x08 enum { MWIFIEX_FUNC_INIT = 1, diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 79c16de8743e..a99b72bbde51 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -746,6 +746,13 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb) mwifiex_queue_main_work(priv->adapter); + if (priv->sched_scanning) { + mwifiex_dbg(priv->adapter, INFO, + "aborting bgscan on ndo_stop\n"); + mwifiex_stop_bg_scan(priv); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); + } + return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 35ab052c8a36..5f5bcf8b9fc9 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -198,6 +198,11 @@ do { \ buf, len, false); \ } while (0) +/** Min BGSCAN interval 15 second */ +#define MWIFIEX_BGSCAN_INTERVAL 15000 +/** default repeat count */ +#define MWIFIEX_BGSCAN_REPEAT_COUNT 6 + struct mwifiex_dbg { u32 num_cmd_host_to_card_failure; u32 num_cmd_sleep_cfm_host_to_card_failure; @@ -641,6 +646,7 @@ struct mwifiex_private { u32 mgmt_frame_mask; struct mwifiex_roc_cfg roc_cfg; bool scan_aborting; + u8 sched_scanning; u8 csa_chan; unsigned long csa_expire_time; u8 del_list_idx; @@ -1198,6 +1204,10 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv, struct host_cmd_ds_command *resp); int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv, void *buf); +int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + void *data_buf); +int mwifiex_stop_bg_scan(struct mwifiex_private *priv); /* * This function checks if the queuing is RA based or not. diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index c20017ced566..d4e214385f54 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -547,6 +547,61 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv, return chan_idx; } +/* This function creates a channel list tlv for bgscan config, based + * on region/band information. + */ +static int +mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv, + const struct mwifiex_bg_scan_cfg + *bgscan_cfg_in, + struct mwifiex_chan_scan_param_set + *scan_chan_list) +{ + enum ieee80211_band band; + struct ieee80211_supported_band *sband; + struct ieee80211_channel *ch; + struct mwifiex_adapter *adapter = priv->adapter; + int chan_idx = 0, i; + + for (band = 0; (band < IEEE80211_NUM_BANDS); band++) { + if (!priv->wdev.wiphy->bands[band]) + continue; + + sband = priv->wdev.wiphy->bands[band]; + + for (i = 0; (i < sband->n_channels) ; i++) { + ch = &sband->channels[i]; + if (ch->flags & IEEE80211_CHAN_DISABLED) + continue; + scan_chan_list[chan_idx].radio_type = band; + + if (bgscan_cfg_in->chan_list[0].scan_time) + scan_chan_list[chan_idx].max_scan_time = + cpu_to_le16((u16)bgscan_cfg_in-> + chan_list[0].scan_time); + else if (ch->flags & IEEE80211_CHAN_NO_IR) + scan_chan_list[chan_idx].max_scan_time = + cpu_to_le16(adapter->passive_scan_time); + else + scan_chan_list[chan_idx].max_scan_time = + cpu_to_le16(adapter-> + specific_scan_time); + + if (ch->flags & IEEE80211_CHAN_NO_IR) + scan_chan_list[chan_idx].chan_scan_mode_bitmap + |= MWIFIEX_PASSIVE_SCAN; + else + scan_chan_list[chan_idx].chan_scan_mode_bitmap + &= ~MWIFIEX_PASSIVE_SCAN; + + scan_chan_list[chan_idx].chan_number = + (u32)ch->hw_value; + chan_idx++; + } + } + return chan_idx; +} + /* This function appends rate TLV to scan config command. */ static int mwifiex_append_rate_tlv(struct mwifiex_private *priv, @@ -2155,6 +2210,212 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv, return 0; } +/* This function prepares an background scan config command to be sent + * to the firmware + */ +int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd, + void *data_buf) +{ + struct host_cmd_ds_802_11_bg_scan_config *bgscan_config = + &cmd->params.bg_scan_config; + struct mwifiex_bg_scan_cfg *bgscan_cfg_in = data_buf; + u8 *tlv_pos = bgscan_config->tlv; + u8 num_probes; + u32 ssid_len, chan_idx, scan_type, scan_dur, chan_num; + int i; + struct mwifiex_ie_types_num_probes *num_probes_tlv; + struct mwifiex_ie_types_repeat_count *repeat_count_tlv; + struct mwifiex_ie_types_bgscan_start_later *start_later_tlv; + struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; + struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv; + struct mwifiex_chan_scan_param_set *temp_chan; + + cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG); + cmd->size = cpu_to_le16(sizeof(*bgscan_config) + S_DS_GEN); + + bgscan_config->action = cpu_to_le16(bgscan_cfg_in->action); + bgscan_config->enable = bgscan_cfg_in->enable; + bgscan_config->bss_type = bgscan_cfg_in->bss_type; + bgscan_config->scan_interval = + cpu_to_le32(bgscan_cfg_in->scan_interval); + bgscan_config->report_condition = + cpu_to_le32(bgscan_cfg_in->report_condition); + + /* stop sched scan */ + if (!bgscan_config->enable) + return 0; + + bgscan_config->chan_per_scan = bgscan_cfg_in->chan_per_scan; + + num_probes = (bgscan_cfg_in->num_probes ? bgscan_cfg_in-> + num_probes : priv->adapter->scan_probes); + + if (num_probes) { + num_probes_tlv = (struct mwifiex_ie_types_num_probes *)tlv_pos; + num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES); + num_probes_tlv->header.len = + cpu_to_le16(sizeof(num_probes_tlv->num_probes)); + num_probes_tlv->num_probes = cpu_to_le16((u16)num_probes); + + tlv_pos += sizeof(num_probes_tlv->header) + + le16_to_cpu(num_probes_tlv->header.len); + } + + if (bgscan_cfg_in->repeat_count) { + repeat_count_tlv = + (struct mwifiex_ie_types_repeat_count *)tlv_pos; + repeat_count_tlv->header.type = + cpu_to_le16(TLV_TYPE_REPEAT_COUNT); + repeat_count_tlv->header.len = + cpu_to_le16(sizeof(repeat_count_tlv->repeat_count)); + repeat_count_tlv->repeat_count = + cpu_to_le16(bgscan_cfg_in->repeat_count); + + tlv_pos += sizeof(repeat_count_tlv->header) + + le16_to_cpu(repeat_count_tlv->header.len); + } + + for (i = 0; i < bgscan_cfg_in->num_ssids; i++) { + ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len; + + wildcard_ssid_tlv = + (struct mwifiex_ie_types_wildcard_ssid_params *)tlv_pos; + wildcard_ssid_tlv->header.type = + cpu_to_le16(TLV_TYPE_WILDCARDSSID); + wildcard_ssid_tlv->header.len = cpu_to_le16( + (u16)(ssid_len + sizeof(wildcard_ssid_tlv-> + max_ssid_length))); + + /* max_ssid_length = 0 tells firmware to perform + * specific scan for the SSID filled, whereas + * max_ssid_length = IEEE80211_MAX_SSID_LEN is for + * wildcard scan. + */ + if (ssid_len) + wildcard_ssid_tlv->max_ssid_length = 0; + else + wildcard_ssid_tlv->max_ssid_length = + IEEE80211_MAX_SSID_LEN; + + memcpy(wildcard_ssid_tlv->ssid, + bgscan_cfg_in->ssid_list[i].ssid.ssid, ssid_len); + + tlv_pos += (sizeof(wildcard_ssid_tlv->header) + + le16_to_cpu(wildcard_ssid_tlv->header.len)); + } + + chan_list_tlv = (struct mwifiex_ie_types_chan_list_param_set *)tlv_pos; + + if (bgscan_cfg_in->chan_list[0].chan_number) { + dev_dbg(priv->adapter->dev, "info: bgscan: Using supplied channel list\n"); + + chan_list_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); + + for (chan_idx = 0; + chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX && + bgscan_cfg_in->chan_list[chan_idx].chan_number; + chan_idx++) { + temp_chan = chan_list_tlv->chan_scan_param + chan_idx; + + /* Increment the TLV header length by size appended */ + le16_add_cpu(&chan_list_tlv->header.len, + sizeof(chan_list_tlv->chan_scan_param)); + + temp_chan->chan_number = + bgscan_cfg_in->chan_list[chan_idx].chan_number; + temp_chan->radio_type = + bgscan_cfg_in->chan_list[chan_idx].radio_type; + + scan_type = + bgscan_cfg_in->chan_list[chan_idx].scan_type; + + if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) + temp_chan->chan_scan_mode_bitmap + |= MWIFIEX_PASSIVE_SCAN; + else + temp_chan->chan_scan_mode_bitmap + &= ~MWIFIEX_PASSIVE_SCAN; + + if (bgscan_cfg_in->chan_list[chan_idx].scan_time) { + scan_dur = (u16)bgscan_cfg_in-> + chan_list[chan_idx].scan_time; + } else { + scan_dur = (scan_type == + MWIFIEX_SCAN_TYPE_PASSIVE) ? + priv->adapter->passive_scan_time : + priv->adapter->specific_scan_time; + } + + temp_chan->min_scan_time = cpu_to_le16(scan_dur); + temp_chan->max_scan_time = cpu_to_le16(scan_dur); + } + } else { + dev_dbg(priv->adapter->dev, + "info: bgscan: Creating full region channel list\n"); + chan_num = + mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in, + chan_list_tlv-> + chan_scan_param); + le16_add_cpu(&chan_list_tlv->header.len, + chan_num * + sizeof(chan_list_tlv->chan_scan_param[0])); + } + + tlv_pos += (sizeof(chan_list_tlv->header) + + le16_to_cpu(chan_list_tlv->header.len)); + + if (bgscan_cfg_in->start_later) { + start_later_tlv = + (struct mwifiex_ie_types_bgscan_start_later *)tlv_pos; + start_later_tlv->header.type = + cpu_to_le16(TLV_TYPE_BGSCAN_START_LATER); + start_later_tlv->header.len = + cpu_to_le16(sizeof(start_later_tlv->start_later)); + start_later_tlv->start_later = + cpu_to_le16(bgscan_cfg_in->start_later); + + tlv_pos += sizeof(start_later_tlv->header) + + le16_to_cpu(start_later_tlv->header.len); + } + + /* Append vendor specific IE TLV */ + mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos); + + le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv); + + return 0; +} + +int mwifiex_stop_bg_scan(struct mwifiex_private *priv) +{ + struct mwifiex_bg_scan_cfg *bgscan_cfg; + + if (!priv->sched_scanning) { + dev_dbg(priv->adapter->dev, "bgscan already stopped!\n"); + return 0; + } + + bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL); + if (!bgscan_cfg) + return -ENOMEM; + + bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; + bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; + bgscan_cfg->enable = false; + + if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, + HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { + kfree(bgscan_cfg); + return -EFAULT; + } + + kfree(bgscan_cfg); + priv->sched_scanning = false; + + return 0; +} + static void mwifiex_update_chan_statistics(struct mwifiex_private *priv, struct mwifiex_ietypes_chanstats *tlv_stat) diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index e486867a4c67..60f3ded747c9 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1873,6 +1873,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, case HostCmd_CMD_802_11_SCAN: ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf); break; + case HostCmd_CMD_802_11_BG_SCAN_CONFIG: + ret = mwifiex_cmd_802_11_bg_scan_config(priv, cmd_ptr, + data_buf); + break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr); break; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 9ac7aa2431b4..4b23d3b95a20 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1076,9 +1076,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, break; case HostCmd_CMD_802_11_BG_SCAN_QUERY: ret = mwifiex_ret_802_11_scan(priv, resp); + cfg80211_sched_scan_results(priv->wdev.wiphy); mwifiex_dbg(adapter, CMD, "info: CMD_RESP: BG_SCAN result is ready!\n"); break; + case HostCmd_CMD_802_11_BG_SCAN_CONFIG: + break; case HostCmd_CMD_TXPWR_CFG: ret = mwifiex_ret_tx_power_cfg(priv, resp); break; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index 23bae87d4d3d..fd8061c73091 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -688,6 +688,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv) HostCmd_ACT_GEN_GET, 0, NULL, false); break; + case EVENT_BG_SCAN_STOPPED: + dev_dbg(adapter->dev, "event: BGS_STOPPED\n"); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); + if (priv->sched_scanning) + priv->sched_scanning = false; + break; + case EVENT_PORT_RELEASE: mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n"); priv->port_open = true; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 210b257aad6b..2cc1a32691bd 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -504,6 +504,13 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) } } + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + if (priv && priv->sched_scanning) { + dev_dbg(adapter->dev, "aborting bgscan!\n"); + mwifiex_stop_bg_scan(priv); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); + } + if (adapter->hs_activated) { mwifiex_dbg(adapter, CMD, "cmd: HS Already activated\n"); -- cgit v1.2.3 From 5323b53d80fc17604b2c60b725af116827330b2c Mon Sep 17 00:00:00 2001 From: chunfan chen Date: Wed, 13 Jan 2016 01:26:53 -0800 Subject: mwifiex: add wowlan info messages This patch adds informative messages in wake up on magic packet, disconnect, pattern configuration paths. Signed-off-by: chunfan chen Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index c27c6cc5f93e..6d36d081e953 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3171,10 +3171,12 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, sizeof(byte_seq)); mef_entry->filter[filt_num].filt_type = TYPE_EQ; - if (first_pat) + if (first_pat) { first_pat = false; - else + mwifiex_dbg(priv->adapter, INFO, "Wake on patterns\n"); + } else { mef_entry->filter[filt_num].filt_action = TYPE_AND; + } filt_num++; } @@ -3200,6 +3202,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv, mef_entry->filter[filt_num].offset = 56; mef_entry->filter[filt_num].filt_type = TYPE_EQ; mef_entry->filter[filt_num].filt_action = TYPE_OR; + mwifiex_dbg(priv->adapter, INFO, "Wake on magic packet\n"); } return ret; } @@ -3295,6 +3298,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, "Failed to set HS params\n"); return ret; } + mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n"); } return ret; -- cgit v1.2.3 From 7d7f07d8c5d35ebfb207650b0dbcf437dad76bab Mon Sep 17 00:00:00 2001 From: chunfan chen Date: Wed, 13 Jan 2016 01:26:54 -0800 Subject: mwifiex: add wowlan net-detect support This patch adds support for wakeup when configured network is detected. Signed-off-by: chunfan chen Signed-off-by: Amitkumar Karwar Signed-off-by: Xinming Hu Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 40 +++++++++++++++--------- drivers/net/wireless/marvell/mwifiex/fw.h | 2 ++ drivers/net/wireless/marvell/mwifiex/main.c | 7 +++++ drivers/net/wireless/marvell/mwifiex/main.h | 1 + drivers/net/wireless/marvell/mwifiex/scan.c | 34 ++++++++++++++++++++ drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 13 ++++++-- 6 files changed, 80 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 6d36d081e953..a1b8d89d399f 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3273,7 +3273,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); - if (!priv->media_connected) { + if (!priv->media_connected && !wowlan->nd_config) { mwifiex_dbg(adapter, ERROR, "Can not configure WOWLAN in disconnected state\n"); return 0; @@ -3285,22 +3285,32 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, return ret; } + memset(&hs_cfg, 0, sizeof(hs_cfg)); + hs_cfg.conditions = le32_to_cpu(adapter->hs_cfg.conditions); + + if (wowlan->nd_config) { + mwifiex_dbg(adapter, INFO, "Wake on net detect\n"); + hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; + mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev, + wowlan->nd_config); + } + if (wowlan->disconnect) { - memset(&hs_cfg, 0, sizeof(hs_cfg)); - hs_cfg.is_invoke_hostcmd = false; - hs_cfg.conditions = HS_CFG_COND_MAC_EVENT; - hs_cfg.gpio = adapter->hs_cfg.gpio; - hs_cfg.gap = adapter->hs_cfg.gap; - ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, - MWIFIEX_SYNC_CMD, &hs_cfg); - if (ret) { - mwifiex_dbg(adapter, ERROR, - "Failed to set HS params\n"); - return ret; - } + hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT; mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n"); } + hs_cfg.is_invoke_hostcmd = false; + hs_cfg.gpio = adapter->hs_cfg.gpio; + hs_cfg.gap = adapter->hs_cfg.gap; + ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, + MWIFIEX_SYNC_CMD, &hs_cfg); + if (ret) { + mwifiex_dbg(adapter, ERROR, + "Failed to set HS params\n"); + return ret; + } + return ret; } @@ -3853,11 +3863,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = { #ifdef CONFIG_PM static const struct wiphy_wowlan_support mwifiex_wowlan_support = { - .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT, + .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_NET_DETECT, .n_patterns = MWIFIEX_MEF_MAX_FILTERS, .pattern_min_len = 1, .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN, .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN, + .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS, }; #endif diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 07bdc2a96b8a..81b491aac343 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -543,6 +543,8 @@ enum P2P_MODES { #define MWIFIEX_MAX_PATTERN_LEN 40 #define MWIFIEX_MAX_OFFSET_LEN 100 +#define MWIFIEX_MAX_ND_MATCH_SETS 10 + #define STACK_NBYTES 100 #define TYPE_DNUM 1 #define TYPE_BYTESEQ 2 diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index a99b72bbde51..3cfa94677a8e 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c @@ -132,6 +132,13 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter) } } + if (adapter->nd_info) { + for (i = 0 ; i < adapter->nd_info->n_matches ; i++) + kfree(adapter->nd_info->matches[i]); + kfree(adapter->nd_info); + adapter->nd_info = NULL; + } + vfree(adapter->chan_stats); kfree(adapter); return 0; diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 5f5bcf8b9fc9..bf9b24ae46fc 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1002,6 +1002,7 @@ struct mwifiex_adapter { u8 active_scan_triggered; bool usb_mc_status; bool usb_mc_setup; + struct cfg80211_wowlan_nd_info *nd_info; }; void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index d4e214385f54..2702bd93f74d 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2092,6 +2092,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, u8 is_bgscan_resp; __le64 fw_tsf = 0; u8 *radio_type; + struct cfg80211_wowlan_nd_match *pmatch; + struct cfg80211_sched_scan_request *nd_config = NULL; is_bgscan_resp = (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_BG_SCAN_QUERY); @@ -2154,6 +2156,21 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, (struct mwifiex_ie_types_data **) &chan_band_tlv); +#ifdef CONFIG_PM + if (priv->wdev.wiphy->wowlan_config) + nd_config = priv->wdev.wiphy->wowlan_config->nd_config; +#endif + + if (nd_config) { + adapter->nd_info = + kzalloc(sizeof(struct cfg80211_wowlan_nd_match) + + sizeof(struct cfg80211_wowlan_nd_match *) * + scan_rsp->number_of_sets, GFP_ATOMIC); + + if (adapter->nd_info) + adapter->nd_info->n_matches = scan_rsp->number_of_sets; + } + for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) { /* * If the TSF TLV was appended to the scan results, save this @@ -2172,6 +2189,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, radio_type = NULL; } + if (chan_band_tlv && adapter->nd_info) { + adapter->nd_info->matches[idx] = + kzalloc(sizeof(*pmatch) + + sizeof(u32), GFP_ATOMIC); + + pmatch = adapter->nd_info->matches[idx]; + + if (!pmatch) { + memset(pmatch, 0, sizeof(*pmatch)); + if (chan_band_tlv) { + pmatch->n_channels = 1; + pmatch->channels[0] = + chan_band->chan_number; + } + } + } + ret = mwifiex_parse_single_response_buf(priv, &bss_info, &bytes_left, le64_to_cpu(fw_tsf), diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 2cc1a32691bd..7277c24ee9aa 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -505,10 +505,17 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) } priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + if (priv && priv->sched_scanning) { - dev_dbg(adapter->dev, "aborting bgscan!\n"); - mwifiex_stop_bg_scan(priv); - cfg80211_sched_scan_stopped(priv->wdev.wiphy); +#ifdef CONFIG_PM + if (!priv->wdev.wiphy->wowlan_config->nd_config) { +#endif + mwifiex_dbg(adapter, CMD, "aborting bgscan!\n"); + mwifiex_stop_bg_scan(priv); + cfg80211_sched_scan_stopped(priv->wdev.wiphy); +#ifdef CONFIG_PM + } +#endif } if (adapter->hs_activated) { -- cgit v1.2.3 From 8de00f1b1c7f37e523430efba8e4257b2761df7c Mon Sep 17 00:00:00 2001 From: chunfan chen Date: Wed, 13 Jan 2016 01:26:55 -0800 Subject: mwifiex: report wakeup reason to cfg80211 This patch adds code to report wakeup reason to cfg80211 when system is resumed. Signed-off-by: chunfan chen Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 58 ++++++++++++++++++++++ drivers/net/wireless/marvell/mwifiex/cmdevt.c | 13 +++++ drivers/net/wireless/marvell/mwifiex/fw.h | 20 ++++++++ drivers/net/wireless/marvell/mwifiex/ioctl.h | 4 ++ drivers/net/wireless/marvell/mwifiex/main.h | 6 +++ drivers/net/wireless/marvell/mwifiex/sta_cmd.c | 19 +++++++ drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c | 3 ++ drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 16 ++++++ 8 files changed, 139 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index a1b8d89d399f..c80d9e2daa16 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3316,6 +3316,64 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, static int mwifiex_cfg80211_resume(struct wiphy *wiphy) { + struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); + struct mwifiex_private *priv = + mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + struct mwifiex_ds_wakeup_reason wakeup_reason; + struct cfg80211_wowlan_wakeup wakeup_report; + int i; + + mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD, + &wakeup_reason); + memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup)); + + wakeup_report.pattern_idx = -1; + + switch (wakeup_reason.hs_wakeup_reason) { + case NO_HSWAKEUP_REASON: + break; + case BCAST_DATA_MATCHED: + break; + case MCAST_DATA_MATCHED: + break; + case UCAST_DATA_MATCHED: + break; + case MASKTABLE_EVENT_MATCHED: + break; + case NON_MASKABLE_EVENT_MATCHED: + if (wiphy->wowlan_config->disconnect) + wakeup_report.disconnect = true; + if (wiphy->wowlan_config->nd_config) + wakeup_report.net_detect = adapter->nd_info; + break; + case NON_MASKABLE_CONDITION_MATCHED: + break; + case MAGIC_PATTERN_MATCHED: + if (wiphy->wowlan_config->magic_pkt) + wakeup_report.magic_pkt = true; + if (wiphy->wowlan_config->n_patterns) + wakeup_report.pattern_idx = 1; + break; + case CONTROL_FRAME_MATCHED: + break; + case MANAGEMENT_FRAME_MATCHED: + break; + default: + break; + } + + if ((wakeup_reason.hs_wakeup_reason > 0) && + (wakeup_reason.hs_wakeup_reason <= 7)) + cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report, + GFP_KERNEL); + + if (adapter->nd_info) { + for (i = 0 ; i < adapter->nd_info->n_matches ; i++) + kfree(adapter->nd_info->matches[i]); + kfree(adapter->nd_info); + adapter->nd_info = NULL; + } + return 0; } diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c index cb25aa7e90db..a12adee776c6 100644 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c @@ -1657,3 +1657,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv, return 0; } + +/* This function handles the command response of hs wakeup reason + * command. + */ +int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + struct host_cmd_ds_wakeup_reason *wakeup_reason) +{ + wakeup_reason->wakeup_reason = + resp->params.hs_wakeup_reason.wakeup_reason; + + return 0; +} diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 81b491aac343..d293e485189b 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -373,6 +373,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER { #define HostCmd_CMD_MGMT_FRAME_REG 0x010c #define HostCmd_CMD_REMAIN_ON_CHAN 0x010d #define HostCmd_CMD_11AC_CFG 0x0112 +#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116 #define HostCmd_CMD_TDLS_CONFIG 0x0100 #define HostCmd_CMD_MC_POLICY 0x0121 #define HostCmd_CMD_TDLS_OPER 0x0122 @@ -607,6 +608,20 @@ struct mwifiex_ie_types_data { #define MWIFIEX_RXPD_FLAGS_TDLS_PACKET 0x01 #define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS 0x20 +enum HS_WAKEUP_REASON { + NO_HSWAKEUP_REASON = 0, + BCAST_DATA_MATCHED, + MCAST_DATA_MATCHED, + UCAST_DATA_MATCHED, + MASKTABLE_EVENT_MATCHED, + NON_MASKABLE_EVENT_MATCHED, + NON_MASKABLE_CONDITION_MATCHED, + MAGIC_PATTERN_MATCHED, + CONTROL_FRAME_MATCHED, + MANAGEMENT_FRAME_MATCHED, + RESERVED +}; + struct txpd { u8 bss_type; u8 bss_num; @@ -2159,6 +2174,10 @@ struct host_cmd_ds_robust_coex { __le16 reserved; } __packed; +struct host_cmd_ds_wakeup_reason { + u16 wakeup_reason; +} __packed; + struct host_cmd_ds_command { __le16 command; __le16 size; @@ -2231,6 +2250,7 @@ struct host_cmd_ds_command { struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg; struct host_cmd_ds_multi_chan_policy mc_policy; struct host_cmd_ds_robust_coex coex; + struct host_cmd_ds_wakeup_reason hs_wakeup_reason; } params; } __packed; diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 6333b163acef..14cfa37deb00 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -271,6 +271,10 @@ struct mwifiex_ds_hs_cfg { u32 gap; }; +struct mwifiex_ds_wakeup_reason { + u16 hs_wakeup_reason; +}; + #define DEEP_SLEEP_ON 1 #define DEEP_SLEEP_OFF 0 #define DEEP_SLEEP_IDLE_TIME 100 diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index bf9b24ae46fc..c755be54cc83 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1599,6 +1599,12 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter); void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter); void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags); void mwifiex_queue_main_work(struct mwifiex_adapter *adapter); +int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, + int cmd_type, + struct mwifiex_ds_wakeup_reason *wakeup_reason); +int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv, + struct host_cmd_ds_command *resp, + struct host_cmd_ds_wakeup_reason *wakeup_reason); void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter); void mwifiex_11n_delba(struct mwifiex_private *priv, int tid); int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c index 60f3ded747c9..30f152601c57 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c @@ -1813,6 +1813,22 @@ static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd, return 0; } +/* This function prepares command to get HS wakeup reason. + * + * Preparation includes - + * - Setting command ID, action and proper size + * - Ensuring correct endian-ness + */ +static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv, + struct host_cmd_ds_command *cmd) +{ + cmd->command = cpu_to_le16(HostCmd_CMD_HS_WAKEUP_REASON); + cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_wakeup_reason) + + S_DS_GEN); + + return 0; +} + /* * This function prepares the commands before sending them to the firmware. * @@ -2067,6 +2083,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no, ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action, data_buf); break; + case HostCmd_CMD_HS_WAKEUP_REASON: + ret = mwifiex_cmd_get_wakeup_reason(priv, cmd_ptr); + break; case HostCmd_CMD_MC_POLICY: ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action, data_buf); diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 4b23d3b95a20..d96523e10eb4 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -1236,6 +1236,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no, case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG: ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp); break; + case HostCmd_CMD_HS_WAKEUP_REASON: + ret = mwifiex_ret_wakeup_reason(priv, resp, data_buf); + break; case HostCmd_CMD_TDLS_CONFIG: break; case HostCmd_CMD_ROBUST_COEX: diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 7277c24ee9aa..5cbee58f8781 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -1465,3 +1465,19 @@ mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len) return 0; } + +/* This function get Host Sleep wake up reason. + * + */ +int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action, + int cmd_type, + struct mwifiex_ds_wakeup_reason *wakeup_reason) +{ + int status = 0; + + status = mwifiex_send_cmd(priv, HostCmd_CMD_HS_WAKEUP_REASON, + HostCmd_ACT_GEN_GET, 0, wakeup_reason, + cmd_type == MWIFIEX_SYNC_CMD); + + return status; +} -- cgit v1.2.3 From fdcab083055d759325c8e0f8999d9e192417fc20 Mon Sep 17 00:00:00 2001 From: Ganapathi Bhat Date: Wed, 13 Jan 2016 01:26:56 -0800 Subject: mwifiex: add RSSI support for net-detect This patch adds support for waking up the device on finding better RSSI. Threshold RSSI value will be configured by application. Signed-off-by: Ganapathi Bhat Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 ++++ drivers/net/wireless/marvell/mwifiex/fw.h | 5 +++++ drivers/net/wireless/marvell/mwifiex/scan.c | 15 +++++++++++++++ 3 files changed, 24 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index c80d9e2daa16..84615533986c 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -2582,6 +2582,10 @@ mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy, bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA; bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET; bgscan_cfg->enable = true; + if (request->min_rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) { + bgscan_cfg->report_condition |= MWIFIEX_BGSCAN_SSID_RSSI_MATCH; + bgscan_cfg->rssi_threshold = request->min_rssi_thold; + } if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG, HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) { diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index d293e485189b..4af916817bcd 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -759,6 +759,11 @@ struct mwifiex_ie_types_repeat_count { __le16 repeat_count; } __packed; +struct mwifiex_ie_types_min_rssi_threshold { + struct mwifiex_ie_types_header header; + __le16 rssi_threshold; +} __packed; + struct mwifiex_ie_types_bgscan_start_later { struct mwifiex_ie_types_header header; __le16 start_later; diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 2702bd93f74d..fc8d8ca67453 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2260,6 +2260,7 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, int i; struct mwifiex_ie_types_num_probes *num_probes_tlv; struct mwifiex_ie_types_repeat_count *repeat_count_tlv; + struct mwifiex_ie_types_min_rssi_threshold *rssi_threshold_tlv; struct mwifiex_ie_types_bgscan_start_later *start_later_tlv; struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv; @@ -2310,6 +2311,20 @@ int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv, le16_to_cpu(repeat_count_tlv->header.len); } + if (bgscan_cfg_in->rssi_threshold) { + rssi_threshold_tlv = + (struct mwifiex_ie_types_min_rssi_threshold *)tlv_pos; + rssi_threshold_tlv->header.type = + cpu_to_le16(TLV_TYPE_RSSI_LOW); + rssi_threshold_tlv->header.len = + cpu_to_le16(sizeof(rssi_threshold_tlv->rssi_threshold)); + rssi_threshold_tlv->rssi_threshold = + cpu_to_le16(bgscan_cfg_in->rssi_threshold); + + tlv_pos += sizeof(rssi_threshold_tlv->header) + + le16_to_cpu(rssi_threshold_tlv->header.len); + } + for (i = 0; i < bgscan_cfg_in->num_ssids; i++) { ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len; -- cgit v1.2.3 From a92277bc3bfe7c41cac13ca4a7d5070033543732 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Wed, 13 Jan 2016 01:26:57 -0800 Subject: mwifiex: use SYNC flag for canceling host sleep Host sleep is cancelled in sdio resume() handler. Cfg80211's resume handler is immediately called after this. SYNC flag here ensures that host sleep handshake gets completed and we have valid "adapter->nd_config" before we report host wakeup reason in cfg80211's resume handler. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 33771d3fd843..abf15dbdfe08 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev) /* Disable Host Sleep */ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), - MWIFIEX_ASYNC_CMD); + MWIFIEX_SYNC_CMD); return 0; } -- cgit v1.2.3 From 3955525d5d17118ed8900e9e79753057120eb02a Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 14 Jan 2016 09:39:21 +0200 Subject: iwlwifi: pcie: buffer packets to avoid overflowing Tx queues When the Tx queues are full above a threshold, we immediately stop the mac80211's queue to stop getting new packets. This worked until TSO was enabled. With TSO, one single packet from mac80211 can use many descriptors since a large send needs to be split into several segments. This means that stopping mac80211's queues is not enough and we also need to ensure that we don't overflow the Tx queues with one single packet from mac80211. Add code to transport layer to do just that. Stop mac80211's queue as soon as the queue is full above the same threshold as before, and keep pushing the current packet along with its segments on the queue, but check that we don't overflow. If that would happen, buffer the segments, and send them when there is room in the Tx queue again. Of course, we first need to send the buffered segments and only then, wake up mac80211's queues. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 1 + drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 69 +++++++++++++++++++--- 2 files changed, 62 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index cc3888e2700d..2d8b415fa55e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -280,6 +280,7 @@ struct iwl_txq { bool ampdu; bool block; unsigned long wd_timeout; + struct sk_buff_head overflow_q; }; static inline dma_addr_t diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 5262028b5505..b0b0fd9e2eff 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -571,6 +571,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, return ret; spin_lock_init(&txq->lock); + __skb_queue_head_init(&txq->overflow_q); /* * Tell nic where to find circular buffer of Tx Frame Descriptors for @@ -621,6 +622,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr); } txq->active = false; + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + spin_unlock_bh(&txq->lock); /* just in case - this queue may have been stopped */ @@ -1052,8 +1060,41 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_progress(txq); - if (iwl_queue_space(&txq->q) > txq->q.low_mark) - iwl_wake_queue(trans, txq); + if (iwl_queue_space(&txq->q) > txq->q.low_mark && + test_bit(txq_id, trans_pcie->queue_stopped)) { + struct sk_buff_head skbs; + + __skb_queue_head_init(&skbs); + skb_queue_splice_init(&txq->overflow_q, &skbs); + + /* + * This is tricky: we are in reclaim path which is non + * re-entrant, so noone will try to take the access the + * txq data from that path. We stopped tx, so we can't + * have tx as well. Bottom line, we can unlock and re-lock + * later. + */ + spin_unlock_bh(&txq->lock); + + while (!skb_queue_empty(&skbs)) { + struct sk_buff *skb = __skb_dequeue(&skbs); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; + struct iwl_device_cmd *dev_cmd = + info->driver_data[dev_cmd_idx]; + + /* + * Note that we can very well be overflowing again. + * In that case, iwl_queue_space will be small again + * and we won't wake mac80211's queue. + */ + iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id); + } + spin_lock_bh(&txq->lock); + + if (iwl_queue_space(&txq->q) > txq->q.low_mark) + iwl_wake_queue(trans, txq); + } if (q->read_ptr == q->write_ptr) { IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id); @@ -2161,6 +2202,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, csum = skb_checksum(skb, offs, skb->len - offs, 0); *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum); + + skb->ip_summed = CHECKSUM_UNNECESSARY; } if (skb_is_nonlinear(skb) && @@ -2177,6 +2220,22 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); + if (iwl_queue_space(q) < q->high_mark) { + iwl_stop_queue(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_queue_space(q) < 3)) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + + info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] = + dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + + spin_unlock(&txq->lock); + return 0; + } + } + /* In AGG mode, the index in the ring must correspond to the WiFi * sequence number. This is a HW requirements to help the SCD to parse * the BA. @@ -2281,12 +2340,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, * At this point the frame is "transmitted" successfully * and we will get a TX status notification eventually. */ - if (iwl_queue_space(q) < q->high_mark) { - if (wait_write_ptr) - iwl_pcie_txq_inc_wr_ptr(trans, txq); - else - iwl_stop_queue(trans, txq); - } spin_unlock(&txq->lock); return 0; out_err: -- cgit v1.2.3 From 7848505416be4e96af14c3387775622d26a13146 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 14 Dec 2015 17:44:11 +0200 Subject: iwlwifi: pcie: add infrastructure for multi-queue rx The 9000 series devices will support multi rx queues. Current code has one static rx queue - change it to allocate a number of queues per the device capability (pre-9000 devices have the number of rx queues set to one). Subsequent generalizations are: Change the code to access an explicit numbered rx queue only when the queue number is known - when handling interrupt, when accessing the default queue and when iterating the queues. The rest of the functions will receive the rx queue as a pointer. Generalize the warning in allocation failure to consider the allocator status instead of a single rx queue status. Move the rx initial pool of memory buffers to be shared among all the queues and allocated to the default queue on init. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 8 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 264 +++++++++++---------- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 73 ++++-- 3 files changed, 191 insertions(+), 154 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 2d8b415fa55e..11ad87fca67a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -57,7 +57,7 @@ #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 +#define RX_PENDING_WATERMARK 16 struct iwl_host_cmd; @@ -103,7 +103,6 @@ struct isr_statistics { * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers @@ -122,7 +121,6 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; }; @@ -298,6 +296,7 @@ struct iwl_tso_hdr_page { /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data + * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area @@ -324,7 +323,8 @@ struct iwl_tso_hdr_page { * @fw_mon_size: size of the buffer for the firmware monitor */ struct iwl_trans_pcie { - struct iwl_rxq rxq; + struct iwl_rxq *rxq; + struct iwl_rx_mem_buffer rx_pool[RX_QUEUE_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index ccafbd8cf4b3..f557f3dc4db8 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -173,10 +173,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans) /* * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue */ -static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) +static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, + struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; u32 reg; lockdep_assert_held(&rxq->lock); @@ -207,18 +206,18 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans) static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - - spin_lock(&rxq->lock); - - if (!rxq->need_update) - goto exit_unlock; + int i; - iwl_pcie_rxq_inc_wr_ptr(trans); - rxq->need_update = false; + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - exit_unlock: - spin_unlock(&rxq->lock); + if (!rxq->need_update) + continue; + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + rxq->need_update = false; + spin_unlock(&rxq->lock); + } } /* @@ -232,10 +231,8 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) * also updates the memory address in the firmware to reference the new * target buffer. */ -static void iwl_pcie_rxq_restock(struct iwl_trans *trans) +static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; /* @@ -272,7 +269,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) * Increment device's write pointer in multiples of 8. */ if (rxq->write_actual != (rxq->write & ~0x7)) { spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); spin_unlock(&rxq->lock); } } @@ -285,13 +282,9 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct page *page; gfp_t gfp_mask = priority; - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; @@ -301,16 +294,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, if (net_ratelimit()) IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. + /* + * Issue an error if we don't have enough pre-allocated + * buffers. ` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) + if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit()) IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); + "Failed to alloc_pages\n"); return NULL; } return page; @@ -325,10 +315,10 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; @@ -386,40 +376,23 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) } } -static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) +static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; int i; - lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_QUEUE_SIZE; i++) { - if (!rxq->pool[i].page) + if (!trans_pcie->rx_pool[i].page) continue; - dma_unmap_page(trans->dev, rxq->pool[i].page_dma, + dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, PAGE_SIZE << trans_pcie->rx_page_order, DMA_FROM_DEVICE); - __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order); - rxq->pool[i].page = NULL; + __free_pages(trans_pcie->rx_pool[i].page, + trans_pcie->rx_page_order); + trans_pcie->rx_pool[i].page = NULL; } } -/* - * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free - * - * When moving to rx_free an page is allocated for the slot. - * - * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization - */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) -{ - iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL); - - iwl_pcie_rxq_restock(trans); -} - /* * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues * @@ -444,6 +417,11 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) while (pending) { int i; struct list_head local_allocated; + gfp_t gfp_mask = GFP_KERNEL; + + /* Do not post a warning if there are only a few requests */ + if (pending < RX_PENDING_WATERMARK) + gfp_mask |= __GFP_NOWARN; INIT_LIST_HEAD(&local_allocated); @@ -463,7 +441,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) BUG_ON(rxb->page); /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL); + page = iwl_pcie_rx_alloc_page(trans, gfp_mask); if (!page) continue; rxb->page = page; @@ -561,38 +539,60 @@ static void iwl_pcie_rx_allocator_work(struct work_struct *data) static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; + int i; + + if (WARN_ON(trans_pcie->rxq)) + return -EINVAL; - memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); + trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), + GFP_KERNEL); + if (!trans_pcie->rxq) + return -EINVAL; - spin_lock_init(&rxq->lock); spin_lock_init(&rba->lock); - if (WARN_ON(rxq->bd || rxq->rb_stts)) - return -EINVAL; + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */ - rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + spin_lock_init(&rxq->lock); + /* + * Allocate the circular buffer of Read Buffer Descriptors + * (RBDs) + */ + rxq->bd = dma_zalloc_coherent(dev, + sizeof(__le32) * RX_QUEUE_SIZE, &rxq->bd_dma, GFP_KERNEL); - if (!rxq->bd) - goto err_bd; + if (!rxq->bd) + goto err; - /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, GFP_KERNEL); - if (!rxq->rb_stts) - goto err_rb_stts; + /*Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), + &rxq->rb_stts_dma, + GFP_KERNEL); + if (!rxq->rb_stts) + goto err; + } return 0; -err_rb_stts: - dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; -err_bd: +err: + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq->bd) + dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + } + kfree(trans_pcie->rxq); return -ENOMEM; } @@ -661,17 +661,12 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { - int i; - lockdep_assert_held(&rxq->lock); INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; rxq->used_count = 0; - - for (i = 0; i < RX_QUEUE_SIZE; i++) - list_add(&rxq->pool[i].list, &rxq->rx_used); } static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) @@ -709,15 +704,16 @@ static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; - if (!rxq->bd) { + if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); if (err) return err; } + def_rxq = trans_pcie->rxq; if (!rba->alloc_wq) rba->alloc_wq = alloc_workqueue("rb_allocator", WQ_HIGHPRI | WQ_UNBOUND, 1); @@ -731,29 +727,42 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) iwl_pcie_rx_init_rba(rba); spin_unlock(&rba->lock); - spin_lock(&rxq->lock); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rxq_free_rbs(trans); - iwl_pcie_rx_init_rxb_lists(rxq); + iwl_pcie_free_rbs_pool(trans); for (i = 0; i < RX_QUEUE_SIZE; i++) - rxq->queue[i] = NULL; + def_rxq->queue[i] = NULL; - /* Set us so that we have processed and used all buffers, but have - * not restocked the Rx queue with fresh buffers */ - rxq->read = rxq->write = 0; - rxq->write_actual = 0; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - spin_unlock(&rxq->lock); + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - iwl_pcie_rx_replenish(trans); + spin_lock(&rxq->lock); + /* + * Set read write pointer to reflect that we have processed + * and used all buffers, but have not restocked the Rx queue + * with fresh buffers + */ + rxq->read = 0; + rxq->write = 0; + rxq->write_actual = 0; + memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); - iwl_pcie_rx_hw_init(trans, rxq); + iwl_pcie_rx_init_rxb_lists(rxq); - spin_lock(&rxq->lock); - iwl_pcie_rxq_inc_wr_ptr(trans); - spin_unlock(&rxq->lock); + spin_unlock(&rxq->lock); + } + + /* move the entire pool to the default queue ownership */ + for (i = 0; i < RX_QUEUE_SIZE; i++) + list_add(&trans_pcie->rx_pool[i].list, &def_rxq->rx_used); + + iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); + iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rx_hw_init(trans, def_rxq); + + spin_lock(&def_rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); + spin_unlock(&def_rxq->lock); return 0; } @@ -761,12 +770,14 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i; - /*if rxq->bd is NULL, it means that nothing has been allocated, - * exit now */ - if (!rxq->bd) { + /* + * if rxq is NULL, it means that nothing has been allocated, + * exit now + */ + if (!trans_pcie->rxq) { IWL_DEBUG_INFO(trans, "Free NULL rx context\n"); return; } @@ -781,23 +792,28 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) iwl_pcie_rx_free_rba(trans); spin_unlock(&rba->lock); - spin_lock(&rxq->lock); - iwl_pcie_rxq_free_rbs(trans); - spin_unlock(&rxq->lock); - - dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; + iwl_pcie_free_rbs_pool(trans); + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + if (rxq->bd) + dma_free_coherent(trans->dev, + sizeof(__le32) * RX_QUEUE_SIZE, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + else + IWL_DEBUG_INFO(trans, + "Free rxq->rb_stts which is NULL\n"); + } - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - else - IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); - rxq->rb_stts_dma = 0; - rxq->rb_stts = NULL; + kfree(trans_pcie->rxq); } /* @@ -841,11 +857,11 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, } static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, + struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb, bool emergency) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; int max_len = PAGE_SIZE << trans_pcie->rx_page_order; @@ -975,7 +991,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 r, i, j, count = 0; bool emergency = false; @@ -1000,7 +1016,7 @@ restart: rxq->queue[i] = NULL; IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); - iwl_pcie_rx_handle_rb(trans, rxb, emergency); + iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); i = (i + 1) & RX_QUEUE_MASK; @@ -1043,7 +1059,7 @@ restart: if (rxq->used_count < RX_QUEUE_SIZE / 3) emergency = false; spin_unlock(&rxq->lock); - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); spin_lock(&rxq->lock); } } @@ -1055,7 +1071,7 @@ restart: if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { rxq->read = i; spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); + iwl_pcie_rxq_restock(trans, rxq); goto restart; } } @@ -1077,7 +1093,7 @@ restart: * will be restocked by the next call of iwl_pcie_rxq_restock. */ if (unlikely(emergency && count)) - iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC); + iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index d60a467a983c..0302aede4fdf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2001,29 +2001,48 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, { struct iwl_trans *trans = file->private_data; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - char buf[256]; - int pos = 0; - const size_t bufsz = sizeof(buf); - - pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", - rxq->read); - pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", - rxq->write); - pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n", - rxq->write_actual); - pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n", - rxq->need_update); - pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n", - rxq->free_count); - if (rxq->rb_stts) { - pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF); - } else { - pos += scnprintf(buf + pos, bufsz - pos, - "closed_rb_num: Not Allocated\n"); + char *buf; + int pos = 0, i, ret; + size_t bufsz = sizeof(buf); + + bufsz = sizeof(char) * 121 * trans->num_rx_queues; + + if (!trans_pcie->rxq) + return -EAGAIN; + + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n", + i); + pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n", + rxq->read); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n", + rxq->write); + pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n", + rxq->write_actual); + pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n", + rxq->need_update); + pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", + rxq->free_count); + if (rxq->rb_stts) { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: %u\n", + le16_to_cpu(rxq->rb_stts->closed_rb_num) & + 0x0FFF); + } else { + pos += scnprintf(buf + pos, bufsz - pos, + "\tclosed_rb_num: Not Allocated\n"); + } } - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); + ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); + kfree(buf); + + return ret; } static ssize_t iwl_dbgfs_interrupt_read(struct file *file, @@ -2188,7 +2207,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int max_len = PAGE_SIZE << trans_pcie->rx_page_order; - struct iwl_rxq *rxq = &trans_pcie->rxq; + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; u32 i, r, j, rb_len = 0; spin_lock(&rxq->lock); @@ -2438,11 +2458,12 @@ static struct iwl_trans_dump_data len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); if (dump_rbs) { + /* Dump RBs is supported only for pre-9000 devices (1 queue) */ + struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(ACCESS_ONCE( - trans_pcie->rxq.rb_stts->closed_rb_num)) + num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; - num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK; + num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + (PAGE_SIZE << trans_pcie->rx_page_order)); -- cgit v1.2.3 From 96a6497bc3ed1c19d877e5f4f95f2cfa27448abe Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 23 Dec 2015 15:10:03 +0200 Subject: iwlwifi: pcie: add 9000 series multi queue rx DMA support The 9000 series introduces several changes in the device DMA operation. As the device now supports multi-queue rx, several DMA channels should be configured. The flows of providing the device with the allocated RBDs now changes as well - the device maintains a separate table of used and free table. The hardware may use the free table to feed RBDs to any queue. This requires maintaing a shared table to map returned RBDs to the original RXB - for that purpose the VID is introduced - an internal identifier of the RB placed in the lower 12 bits and returned by HW in the used data. Another change is the support of 64 bit DMA address. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 3 +- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 + drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 77 ++++++ drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 28 ++- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 277 ++++++++++++++++----- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 15 +- 6 files changed, 322 insertions(+), 80 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index ecbf4822cd69..4b93404f46a7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -138,7 +138,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .smem_offset = IWL9000_SMEM_OFFSET, \ .smem_len = IWL9000_SMEM_LEN, \ .thermal_params = &iwl9000_tt_params, \ - .apmg_not_supported = true + .apmg_not_supported = true, \ + .mq_rx_supported = true const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index f99048135fb9..dad5570d6cc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -311,6 +311,7 @@ struct iwl_pwr_tx_backoff { * @dccm2_len: length of the second DCCM * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM + * @mq_rx_supported: multi-queue rx support * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -362,6 +363,7 @@ struct iwl_cfg { const u32 smem_len; const struct iwl_tt_params *thermal_params; bool apmg_not_supported; + bool mq_rx_supported; }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 5cc6be927eab..4ab6682ea53e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -31,6 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -312,6 +314,77 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28 #define FH_MEM_TB_MAX_LENGTH (0x00020000) +/* 9000 rx series registers */ + +#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */ +#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define RFH_Q0_FRBDCB_WIDX 0xA08080 +#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) +/* Read index table */ +#define RFH_Q0_FRBDCB_RIDX 0xA080C0 +#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) +/* Used list table */ +#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */ +#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) +/* Write index table */ +#define RFH_Q0_URBDCB_WIDX 0xA08180 +#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) +#define RFH_Q0_URBDCB_VAID 0xA081C0 +#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) +/* stts */ +#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */ +#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) + +#define RFH_Q0_ORB_WPTR_LSB 0xA08280 +#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) +#define RFH_RBDBUF_RBD0_LSB 0xA08300 +#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) + +/* DMA configuration */ +#define RFH_RXF_DMA_CFG 0xA09820 +/* RB size */ +#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ +#define RFH_RXF_DMA_RB_SIZE_POS 16 +#define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_12K (0x9 << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_16K (0xA << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_20K (0xB << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_24K (0xC << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_28K (0xD << RFH_RXF_DMA_RB_SIZE_POS) +#define RFH_RXF_DMA_RB_SIZE_32K (0xE << RFH_RXF_DMA_RB_SIZE_POS) +/* RB Circular Buffer size:defines the table sizes in RBD units */ +#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */ +#define RFH_RXF_DMA_RBDCB_SIZE_POS 20 +#define RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) +#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ +#define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 +#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) +#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ +#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ +#define RFH_DMA_EN_ENABLE_VAL BIT(31) + +#define RFH_RXF_RXQ_ACTIVE 0xA0980C + +#define RFH_GEN_CFG 0xA09800 +#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 +#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) +#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) +#define DEFAULT_RXQ_NUM 8 + +/* end of 9000 rx series registers */ + /* TFDB Area - TFDs buffer table */ #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF) #define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900) @@ -434,6 +507,10 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) */ #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) +#define MQ_RX_TABLE_SIZE 512 +#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) +#define MQ_RX_POOL_SIZE MQ_RX_TABLE_MASK + #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 11ad87fca67a..bdda7028c393 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -56,7 +56,6 @@ #define RX_NUM_QUEUES 1 #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) #define RX_PENDING_WATERMARK 16 struct iwl_host_cmd; @@ -64,9 +63,16 @@ struct iwl_host_cmd; /*This file includes the declaration that are internal to the * trans_pcie layer */ +/** + * struct iwl_rx_mem_buffer + * @page_dma: bus address of rxb page + * @page: driver's pointer to the rxb page + * @vid: index of this rxb in the global table + */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; struct page *page; + u16 vid; struct list_head list; }; @@ -90,8 +96,12 @@ struct isr_statistics { /** * struct iwl_rxq - Rx queue - * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) + * @id: queue index + * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). + * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) + * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free @@ -103,18 +113,22 @@ struct isr_statistics { * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @queue: actual rx queue + * @queue: actual rx queue. Not used for multi-rx queue. * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { - __le32 *bd; + int id; + void *bd; dma_addr_t bd_dma; + __le32 *used_bd; + dma_addr_t used_bd_dma; u32 read; u32 write; u32 free_count; u32 used_count; u32 write_actual; + u32 queue_size; struct list_head rx_free; struct list_head rx_used; bool need_update; @@ -126,7 +140,6 @@ struct iwl_rxq { /** * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator * @req_pending: number of requests the allcator had not processed yet * @req_ready: number of requests honored and ready for claiming * @rbd_allocated: RBDs with pages allocated and ready to be handled to @@ -138,7 +151,6 @@ struct iwl_rxq { * @rx_alloc: work struct for background calls */ struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; atomic_t req_pending; atomic_t req_ready; struct list_head rbd_allocated; @@ -297,6 +309,7 @@ struct iwl_tso_hdr_page { * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues + * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area @@ -324,7 +337,8 @@ struct iwl_tso_hdr_page { */ struct iwl_trans_pcie { struct iwl_rxq *rxq; - struct iwl_rx_mem_buffer rx_pool[RX_QUEUE_SIZE]; + struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE]; + struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index f557f3dc4db8..a385f3cddb5d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -140,8 +140,8 @@ */ static int iwl_rxq_space(const struct iwl_rxq *rxq) { - /* Make sure RX_QUEUE_SIZE is a power of 2 */ - BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1)); + /* Make sure rx queue size is a power of 2 */ + WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); /* * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity @@ -149,7 +149,7 @@ static int iwl_rxq_space(const struct iwl_rxq *rxq) * The following is equivalent to modulo by RX_QUEUE_SIZE and is well * defined for negative dividends. */ - return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1); + return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); } /* @@ -160,6 +160,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } +static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) +{ + iwl_write_prph(trans, ofs, val & 0xffffffff); + iwl_write_prph(trans, ofs + 4, val >> 32); +} + /* * iwl_pcie_rx_stop - stops the Rx DMA */ @@ -200,7 +206,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, } rxq->write_actual = round_down(rxq->write, 8); - iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); + if (trans->cfg->mq_rx_supported) + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), + rxq->write_actual); + else + iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) @@ -220,6 +230,51 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } } +static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_rx_mem_buffer *rxb; + + /* + * If the device isn't enabled - no need to try to add buffers... + * This can happen when we stop the device and still have an interrupt + * pending. We stop the APM before we sync the interrupts because we + * have to (see comment there). On the other hand, since the APM is + * stopped, we cannot access the HW (in particular not prph). + * So don't try to restock if the APM has been already stopped. + */ + if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return; + + spin_lock(&rxq->lock); + while (rxq->free_count) { + __le64 *bd = (__le64 *)rxq->bd; + + /* Get next free Rx buffer, remove from free list */ + rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, + list); + list_del(&rxb->list); + + /* 12 first bits are expected to be empty */ + WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); + /* Point to Rx buffer via next RBD in circular buffer */ + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; + rxq->free_count--; + } + spin_unlock(&rxq->lock); + + /* + * If we've added more space for the firmware to place data, tell it. + * Increment device's write pointer in multiples of 8. + */ + if (rxq->write_actual != (rxq->write & ~0x7)) { + spin_lock(&rxq->lock); + iwl_pcie_rxq_inc_wr_ptr(trans, rxq); + spin_unlock(&rxq->lock); + } +} + /* * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool * @@ -248,6 +303,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) spin_lock(&rxq->lock); while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) { + __le32 *bd = (__le32 *)rxq->bd; /* The overwritten rxb must be a used one */ rxb = rxq->queue[rxq->write]; BUG_ON(rxb && rxb->page); @@ -258,7 +314,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) list_del(&rxb->list); /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); + bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; @@ -362,10 +418,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, __free_pages(page, trans_pcie->rx_page_order); return; } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock(&rxq->lock); @@ -381,7 +433,7 @@ static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < RX_QUEUE_SIZE; i++) { + for (i = 0; i < MQ_RX_POOL_SIZE; i++) { if (!trans_pcie->rx_pool[i].page) continue; dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, @@ -455,10 +507,6 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) __free_pages(page, trans_pcie->rx_page_order); continue; } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); /* move the allocated entry to the out list */ list_move(&rxb->list, &local_allocated); @@ -542,6 +590,8 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; int i; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); if (WARN_ON(trans_pcie->rxq)) return -EINVAL; @@ -557,16 +607,30 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans) struct iwl_rxq *rxq = &trans_pcie->rxq[i]; spin_lock_init(&rxq->lock); + if (trans->cfg->mq_rx_supported) + rxq->queue_size = MQ_RX_TABLE_SIZE; + else + rxq->queue_size = RX_QUEUE_SIZE; + /* * Allocate the circular buffer of Read Buffer Descriptors * (RBDs) */ rxq->bd = dma_zalloc_coherent(dev, - sizeof(__le32) * RX_QUEUE_SIZE, - &rxq->bd_dma, GFP_KERNEL); + free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); if (!rxq->bd) goto err; + if (trans->cfg->mq_rx_supported) { + rxq->used_bd = dma_zalloc_coherent(dev, + sizeof(__le32) * + rxq->queue_size, + &rxq->used_bd_dma, + GFP_KERNEL); + if (!rxq->used_bd) + goto err; + } /*Allocate the driver's pointer to receive buffer status */ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), @@ -582,7 +646,7 @@ err: struct iwl_rxq *rxq = &trans_pcie->rxq[i]; if (rxq->bd) - dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE, + dma_free_coherent(dev, free_size * rxq->queue_size, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; @@ -591,8 +655,15 @@ err: dma_free_coherent(trans->dev, sizeof(struct iwl_rb_status), rxq->rb_stts, rxq->rb_stts_dma); + + if (rxq->used_bd) + dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; } kfree(trans_pcie->rxq); + return -ENOMEM; } @@ -659,46 +730,82 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { - lockdep_assert_held(&rxq->lock); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 rb_size, enabled = 0; + int i; - INIT_LIST_HEAD(&rxq->rx_free); - INIT_LIST_HEAD(&rxq->rx_used); - rxq->free_count = 0; - rxq->used_count = 0; -} + switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_4K: + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + break; + case IWL_AMSDU_8K: + rb_size = RFH_RXF_DMA_RB_SIZE_8K; + break; + case IWL_AMSDU_12K: + rb_size = RFH_RXF_DMA_RB_SIZE_12K; + break; + default: + WARN_ON(1); + rb_size = RFH_RXF_DMA_RB_SIZE_4K; + } -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; + /* Stop Rx DMA */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + /* disable free amd used rx queue operation */ + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); - lockdep_assert_held(&rba->lock); + for (i = 0; i < trans->num_rx_queues; i++) { + /* Tell device where to find RBD free table in DRAM */ + iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), + (u64)(rxq->bd_dma)); + /* Tell device where to find RBD used table in DRAM */ + iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), + (u64)(rxq->used_bd_dma)); + /* Tell device where in DRAM to update its Rx status */ + iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), + rxq->rb_stts_dma); + /* Reset device indice tables */ + iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); + iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); + iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); + + enabled |= BIT(i) | BIT(i + 16); + } - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); + /* restock default queue */ + iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); + /* + * Enable Rx DMA + * Single frame mode + * Rx buffer size 4 or 8k or 12k + * Min RB size 4 or 8 + * 512 RBDs + */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG, + RFH_DMA_EN_ENABLE_VAL | + rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | + RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_RBDCB_SIZE_512); + + iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | + RFH_GEN_CFG_SERVICE_DMA_SNOOP); + iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); + + /* Set interrupt coalescing timer to default (2048 usecs) */ + iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); } -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) +static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - lockdep_assert_held(&rba->lock); + lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } + INIT_LIST_HEAD(&rxq->rx_free); + INIT_LIST_HEAD(&rxq->rx_used); + rxq->free_count = 0; + rxq->used_count = 0; } int iwl_pcie_rx_init(struct iwl_trans *trans) @@ -706,7 +813,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i, err; + int i, err, num_rbds, allocator_pool_size; if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); @@ -722,9 +829,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) spin_lock(&rba->lock); atomic_set(&rba->req_pending, 0); atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); + INIT_LIST_HEAD(&rba->rbd_allocated); + INIT_LIST_HEAD(&rba->rbd_empty); spin_unlock(&rba->lock); /* free all first - we might be reconfigured for a different size */ @@ -736,6 +842,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + rxq->id = i; + spin_lock(&rxq->lock); /* * Set read write pointer to reflect that we have processed @@ -752,13 +860,29 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) spin_unlock(&rxq->lock); } - /* move the entire pool to the default queue ownership */ - for (i = 0; i < RX_QUEUE_SIZE; i++) - list_add(&trans_pcie->rx_pool[i].list, &def_rxq->rx_used); + /* move the pool to the default queue and allocator ownerships */ + num_rbds = trans->cfg->mq_rx_supported ? + MQ_RX_POOL_SIZE : RX_QUEUE_SIZE; + allocator_pool_size = trans->num_rx_queues * + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); + for (i = 0; i < num_rbds; i++) { + struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; + + if (i < allocator_pool_size) + list_add(&rxb->list, &rba->rbd_empty); + else + list_add(&rxb->list, &def_rxq->rx_used); + trans_pcie->global_table[i] = rxb; + rxb->vid = (u16)i; + } iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); - iwl_pcie_rxq_restock(trans, def_rxq); - iwl_pcie_rx_hw_init(trans, def_rxq); + if (trans->cfg->mq_rx_supported) { + iwl_pcie_rx_mq_hw_init(trans, def_rxq); + } else { + iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rx_hw_init(trans, def_rxq); + } spin_lock(&def_rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); @@ -771,6 +895,8 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; + int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); int i; /* @@ -788,10 +914,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rba->alloc_wq = NULL; } - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); - iwl_pcie_free_rbs_pool(trans); for (i = 0; i < trans->num_rx_queues; i++) { @@ -799,7 +921,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) if (rxq->bd) dma_free_coherent(trans->dev, - sizeof(__le32) * RX_QUEUE_SIZE, + free_size * rxq->queue_size, rxq->bd, rxq->bd_dma); rxq->bd_dma = 0; rxq->bd = NULL; @@ -811,8 +933,14 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) else IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n"); - } + if (rxq->used_bd) + dma_free_coherent(trans->dev, + sizeof(__le32) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + } kfree(trans_pcie->rxq); } @@ -1009,16 +1137,26 @@ restart: while (i != r) { struct iwl_rx_mem_buffer *rxb; - if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2)) + if (unlikely(rxq->used_count == rxq->queue_size / 2)) emergency = true; - rxb = rxq->queue[i]; - rxq->queue[i] = NULL; + if (trans->cfg->mq_rx_supported) { + /* + * used_bd is a 32 bit but only 12 are used to retrieve + * the vid + */ + u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); + + rxb = trans_pcie->global_table[vid]; + } else { + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + } IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); - i = (i + 1) & RX_QUEUE_MASK; + i = (i + 1) & (rxq->queue_size - 1); /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - * try to claim the pre-allocated buffers from the allocator */ @@ -1056,7 +1194,7 @@ restart: count++; if (count == 8) { count = 0; - if (rxq->used_count < RX_QUEUE_SIZE / 3) + if (rxq->used_count < rxq->queue_size / 3) emergency = false; spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); @@ -1071,7 +1209,10 @@ restart: if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { rxq->read = i; spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans, rxq); + if (trans->cfg->mq_rx_supported) + iwl_pcie_rxq_mq_restock(trans, rxq); + else + iwl_pcie_rxq_restock(trans, rxq); goto restart; } } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 0302aede4fdf..35810965221c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2403,7 +2403,8 @@ static struct iwl_trans_dump_data u32 len, num_rbs; u32 monitor_len; int i, ptr; - bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status); + bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && + !trans->cfg->mq_rx_supported; /* transport dump header */ len = sizeof(*dump_data); @@ -2562,7 +2563,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; - int ret; + int ret, addr_size; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); @@ -2600,11 +2601,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, PCIE_LINK_STATE_CLKPM); } + if (cfg->mq_rx_supported) + addr_size = 64; + else + addr_size = 36; + pci_set_master(pdev); - ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size)); if (!ret) - ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + ret = pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(addr_size)); if (ret) { ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (!ret) -- cgit v1.2.3 From b3ff1270566d41eb8ab2d67844bf17b7fa9fee78 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Wed, 6 Jan 2016 18:40:38 -0200 Subject: iwlwifi: pcie: add initial RTPM support for PCI Add an initial implementation of runtime power management (RTPM) for PCI devices. With this patch, RTPM is only used when wifi is off (i.e. the wifi interface is down). This implementation is behind a new Kconfig flag, IWLWIFI_PCIE_RTPM. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/Kconfig | 12 +++++ drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 60 +++++++++++++++++++++++-- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 27 +++++++++++ 3 files changed, 96 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 866067789330..11932d53ea24 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -99,6 +99,18 @@ config IWLWIFI_UAPSD If unsure, say N. +config IWLWIFI_PCIE_RTPM + bool "Enable runtime power management mode for PCIe devices" + depends on IWLMVM && PM + default false + help + Say Y here to enable runtime power management for PCIe + devices. If enabled, the device will go into low power mode + when idle for a short period of time, allowing for improved + power saving during runtime. + + If unsure, say N. + menu "Debugging Options" config IWLWIFI_DEBUG diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 6261a68cae90..676d2391eb66 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -66,6 +67,9 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include +#ifdef CONFIG_IWLWIFI_PCIE_RTPM +#include +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ #include #include #include @@ -623,6 +627,13 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_drv; +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, + iwlwifi_mod_params.d0i3_entry_delay); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_allow(&pdev->dev); +#endif return 0; out_free_drv: @@ -689,15 +700,58 @@ static int iwl_pci_resume(struct device *device) return 0; } -static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume); +#ifdef CONFIG_IWLWIFI_PCIE_RTPM +static int iwl_pci_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + + IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); + + /* For now we only allow D0I3 if the device is off */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return -EBUSY; + + trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; + + iwl_trans_d3_suspend(trans, false); + + return 0; +} + +static int iwl_pci_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + enum iwl_d3_status d3_status; + + IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); + + iwl_trans_d3_resume(trans, &d3_status, false); + + trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + + return 0; +} +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + +static const struct dev_pm_ops iwl_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend, + iwl_pci_resume) +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, + iwl_pci_runtime_resume, + NULL) +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ +}; #define IWL_PM_OPS (&iwl_dev_pm_ops) -#else +#else /* CONFIG_PM_SLEEP */ #define IWL_PM_OPS NULL -#endif +#endif /* CONFIG_PM_SLEEP */ static struct pci_driver iwl_pci_driver = { .name = DRV_NAME, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 35810965221c..db94fe1e1bc6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -72,6 +72,9 @@ #include #include #include +#ifdef CONFIG_IWLWIFI_PCIE_RTPM +#include +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ #include "iwl-drv.h" #include "iwl-trans.h" @@ -1194,6 +1197,9 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) if (hw_rfkill != was_hw_rfkill) iwl_trans_pcie_rf_kill(trans, hw_rfkill); +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + pm_runtime_put_sync(trans->dev); +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ /* re-take ownership to prevent other users from stealing the deivce */ iwl_pcie_prepare_card_hw(trans); } @@ -1353,6 +1359,9 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + pm_runtime_get_sync(trans->dev); +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ return 0; } @@ -1476,6 +1485,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + /* TODO: check if this is really needed */ + pm_runtime_disable(trans->dev); +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ synchronize_irq(trans_pcie->pci_dev->irq); iwl_pcie_tx_free(trans); @@ -1831,6 +1844,9 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans) spin_lock_irqsave(&trans_pcie->ref_lock, flags); IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); trans_pcie->ref_count++; +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + pm_runtime_get(&trans_pcie->pci_dev->dev); +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -1849,6 +1865,11 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans) return; } trans_pcie->ref_count--; +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); + pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -2728,6 +2749,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->inta_mask = CSR_INI_SET_MASK; +#ifdef CONFIG_IWLWIFI_PCIE_RTPM + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; +#else + trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; +#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + return trans; out_free_ict: -- cgit v1.2.3 From 4cbb8e50338a2f2453ba399ce52562e0a111fc1f Mon Sep 17 00:00:00 2001 From: Luciano Coelho Date: Tue, 18 Aug 2015 16:02:38 +0300 Subject: iwlwifi: pcie: add RTPM support when wifi is enabled Enable runtime power management (RTPM) for PCIe devices and implement the corresponding functions to enable D0i3 mode when the device is idle. Additionally, remove some unnecessary #ifdef's because the RTPM code will not be called if runtime PM is not configured. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 5 ++ drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 98 +++++++++++++++++++--- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 5 ++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 43 ++++++---- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 17 +++- 5 files changed, 141 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 82fb3a97a46d..fe170a3fb1a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -736,6 +736,11 @@ enum iwl_plat_pm_mode { IWL_PLAT_PM_MODE_D0I3, }; +/* Max time to wait for trans to become idle/non-idle on d0i3 + * enter/exit (in msecs). + */ +#define IWL_TRANS_IDLE_TIMEOUT 2000 + /** * struct iwl_trans - transport common data * diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 676d2391eb66..16b579a5aa6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -67,9 +67,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#ifdef CONFIG_IWLWIFI_PCIE_RTPM #include -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ #include #include #include @@ -627,13 +625,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_free_drv; -#ifdef CONFIG_IWLWIFI_PCIE_RTPM - pm_runtime_set_active(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, + /* if RTPM is in use, enable it in our device */ + if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { + pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, iwlwifi_mod_params.d0i3_entry_delay); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_allow(&pdev->dev); -#endif + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_allow(&pdev->dev); + } + return 0; out_free_drv: @@ -700,17 +700,90 @@ static int iwl_pci_resume(struct device *device) return 0; } +int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + if (test_bit(STATUS_FW_ERROR, &trans->status)) + return 0; + + set_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + + /* config the fw */ + ret = iwl_op_mode_enter_d0i3(trans->op_mode); + if (ret == 1) { + IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n"); + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + return -EBUSY; + } + if (ret) + goto err; + + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + test_bit(STATUS_TRANS_IDLE, &trans->status), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout entering D0i3\n"); + ret = -ETIMEDOUT; + goto err; + } + + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + + return 0; +err: + clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status); + iwl_trans_fw_error(trans); + return ret; +} + +int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int ret; + + /* sometimes a D0i3 entry is not followed through */ + if (!test_bit(STATUS_TRANS_IDLE, &trans->status)) + return 0; + + /* config the fw */ + ret = iwl_op_mode_exit_d0i3(trans->op_mode); + if (ret) + goto err; + + /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */ + + ret = wait_event_timeout(trans_pcie->d0i3_waitq, + !test_bit(STATUS_TRANS_IDLE, &trans->status), + msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); + if (!ret) { + IWL_ERR(trans, "Timeout exiting D0i3\n"); + ret = -ETIMEDOUT; + goto err; + } + + return 0; +err: + clear_bit(STATUS_TRANS_IDLE, &trans->status); + iwl_trans_fw_error(trans); + return ret; +} + #ifdef CONFIG_IWLWIFI_PCIE_RTPM static int iwl_pci_runtime_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); struct iwl_trans *trans = pci_get_drvdata(pdev); + int ret; IWL_DEBUG_RPM(trans, "entering runtime suspend\n"); - /* For now we only allow D0I3 if the device is off */ - if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) - return -EBUSY; + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { + ret = iwl_pci_fw_enter_d0i3(trans); + if (ret < 0) + return ret; + } trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; @@ -729,7 +802,8 @@ static int iwl_pci_runtime_resume(struct device *device) iwl_trans_d3_resume(trans, &d3_status, false); - trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return iwl_pci_fw_exit_d0i3(trans); return 0; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index bdda7028c393..7bc02e0cdd93 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -374,6 +375,7 @@ struct iwl_trans_pcie { bool ucode_write_complete; wait_queue_head_t ucode_write_waitq; wait_queue_head_t wait_command_queue; + wait_queue_head_t d0i3_waitq; u8 cmd_queue; u8 cmd_fifo; @@ -594,4 +596,7 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) } #endif +int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans); +int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans); + #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index db94fe1e1bc6..cfdc7f6e554a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -72,9 +72,7 @@ #include #include #include -#ifdef CONFIG_IWLWIFI_PCIE_RTPM #include -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ #include "iwl-drv.h" #include "iwl-trans.h" @@ -1197,9 +1195,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) if (hw_rfkill != was_hw_rfkill) iwl_trans_pcie_rf_kill(trans, hw_rfkill); -#ifdef CONFIG_IWLWIFI_PCIE_RTPM - pm_runtime_put_sync(trans->dev); -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ /* re-take ownership to prevent other users from stealing the deivce */ iwl_pcie_prepare_card_hw(trans); } @@ -1359,9 +1354,10 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) /* ... rfkill can call stop_device and set it false if needed */ iwl_trans_pcie_rf_kill(trans, hw_rfkill); -#ifdef CONFIG_IWLWIFI_PCIE_RTPM - pm_runtime_get_sync(trans->dev); -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + /* Make sure we sync here, because we'll need full access later */ + if (low_power) + pm_runtime_resume(trans->dev); + return 0; } @@ -1485,10 +1481,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; -#ifdef CONFIG_IWLWIFI_PCIE_RTPM /* TODO: check if this is really needed */ pm_runtime_disable(trans->dev); -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ + synchronize_irq(trans_pcie->pci_dev->irq); iwl_pcie_tx_free(trans); @@ -1844,9 +1839,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans) spin_lock_irqsave(&trans_pcie->ref_lock, flags); IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count); trans_pcie->ref_count++; -#ifdef CONFIG_IWLWIFI_PCIE_RTPM pm_runtime_get(&trans_pcie->pci_dev->dev); -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -1865,10 +1858,9 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans) return; } trans_pcie->ref_count--; -#ifdef CONFIG_IWLWIFI_PCIE_RTPM + pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev); pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev); -#endif /* CONFIG_IWLWIFI_PCIE_RTPM */ spin_unlock_irqrestore(&trans_pcie->ref_lock, flags); } @@ -2536,6 +2528,22 @@ static struct iwl_trans_dump_data return dump_data; } +#ifdef CONFIG_PM_SLEEP +static int iwl_trans_pcie_suspend(struct iwl_trans *trans) +{ + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) + return iwl_pci_fw_enter_d0i3(trans); + + return 0; +} + +static void iwl_trans_pcie_resume(struct iwl_trans *trans) +{ + if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) + iwl_pci_fw_exit_d0i3(trans); +} +#endif /* CONFIG_PM_SLEEP */ + static const struct iwl_trans_ops trans_ops_pcie = { .start_hw = iwl_trans_pcie_start_hw, .op_mode_leave = iwl_trans_pcie_op_mode_leave, @@ -2546,6 +2554,11 @@ static const struct iwl_trans_ops trans_ops_pcie = { .d3_suspend = iwl_trans_pcie_d3_suspend, .d3_resume = iwl_trans_pcie_d3_resume, +#ifdef CONFIG_PM_SLEEP + .suspend = iwl_trans_pcie_suspend, + .resume = iwl_trans_pcie_resume, +#endif /* CONFIG_PM_SLEEP */ + .send_cmd = iwl_trans_pcie_send_hcmd, .tx = iwl_trans_pcie_tx, @@ -2735,6 +2748,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); + init_waitqueue_head(&trans_pcie->d0i3_waitq); + ret = iwl_pcie_alloc_ict(trans); if (ret) goto out_pci_disable_msi; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index b0b0fd9e2eff..c499345ba526 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,7 +1,8 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -1727,6 +1728,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, wake_up(&trans_pcie->wait_command_queue); } + if (meta->flags & CMD_MAKE_TRANS_IDLE) { + IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", + iwl_get_cmd_string(trans, cmd->hdr.cmd)); + set_bit(STATUS_TRANS_IDLE, &trans->status); + wake_up(&trans_pcie->d0i3_waitq); + } + + if (meta->flags & CMD_WAKE_UP_TRANS) { + IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", + iwl_get_cmd_string(trans, cmd->hdr.cmd)); + clear_bit(STATUS_TRANS_IDLE, &trans->status); + wake_up(&trans_pcie->d0i3_waitq); + } + meta->flags = 0; spin_unlock_bh(&txq->lock); -- cgit v1.2.3 From 23ae61282b88873bec2d56c78fea531f8485146c Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Thu, 31 Dec 2015 18:18:02 +0200 Subject: iwlwifi: mvm: Do not switch to D3 image on suspend Currently when the driver is configured with wowlan parameters, and enters D3 mode, the driver switches the FW image to D3, and when it exists suspend, it reloads the D0 image. If the firmware supports the consolidation of the D0 & D3 images there is no need to load the D3 image on suspend, and no need to reload the D0 image on resume. Do not switch images on suspend / resume, for firmwares that support consolidated images. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c | 4 +- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 2 + drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 13 +++-- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 70 ++++++++++++++++------- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 4 +- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 4 +- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 11 ++-- 7 files changed, 70 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 29ea1c6705b4..4db4cb7aa73a 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -396,7 +396,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw, iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); - iwl_trans_d3_suspend(priv->trans, false); + iwl_trans_d3_suspend(priv->trans, false, true); goto out; @@ -469,7 +469,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) /* we'll clear ctx->vif during iwlagn_prepare_restart() */ vif = ctx->vif; - ret = iwl_trans_d3_resume(priv->trans, &d3_status, false); + ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true); if (ret) goto out_unlock; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 84f8aeb926c8..2273908ad83a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -297,6 +297,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), * which also implies support for the scheduler configuration command * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching + * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload @@ -330,6 +331,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = (__force iwl_ucode_tlv_capa_t)11, IWL_UCODE_TLV_CAPA_DQA_SUPPORT = (__force iwl_ucode_tlv_capa_t)12, IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = (__force iwl_ucode_tlv_capa_t)13, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = (__force iwl_ucode_tlv_capa_t)17, IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = (__force iwl_ucode_tlv_capa_t)18, IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index fe170a3fb1a6..e6a5e99f95e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -618,9 +618,9 @@ struct iwl_trans_ops { void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*stop_device)(struct iwl_trans *trans, bool low_power); - void (*d3_suspend)(struct iwl_trans *trans, bool test); + void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test); + bool test, bool reset); int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); @@ -925,22 +925,23 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans) _iwl_trans_stop_device(trans, true); } -static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test) +static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { might_sleep(); if (trans->ops->d3_suspend) - trans->ops->d3_suspend(trans, test); + trans->ops->d3_suspend(trans, test, reset); } static inline int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test) + bool test, bool reset) { might_sleep(); if (!trans->ops->d3_resume) return 0; - return trans->ops->d3_resume(trans, status, test); + return trans->ops->d3_resume(trans, status, test, reset); } static inline void iwl_trans_ref(struct iwl_trans *trans) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index d3e21d95cece..78572ef89b26 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1023,14 +1025,18 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm, struct ieee80211_sta *ap_sta) { int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; - ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); - if (ret) - return ret; + ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta); + if (ret) + return ret; + } if (!iwlwifi_mod_params.sw_crypto) { /* @@ -1072,10 +1078,14 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, { struct iwl_wowlan_config_cmd wowlan_config_cmd = {}; int ret; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); - ret = iwl_mvm_switch_to_d3(mvm); - if (ret) - return ret; + if (!unified_image) { + ret = iwl_mvm_switch_to_d3(mvm); + if (ret) + return ret; + } /* rfkill release can be either for wowlan or netdetect */ if (wowlan->rfkill_release) @@ -1151,6 +1161,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, }; int ret; int len __maybe_unused; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); if (!wowlan) { /* @@ -1236,7 +1248,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); - iwl_trans_d3_suspend(mvm->trans, test); + iwl_trans_d3_suspend(mvm->trans, test, !unified_image); out: if (ret < 0) { iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); @@ -1299,7 +1311,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags); mutex_unlock(&mvm->d0i3_suspend_mutex); - iwl_trans_d3_suspend(trans, false); + iwl_trans_d3_suspend(trans, false, false); return 0; } @@ -2041,9 +2053,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac, static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) { struct ieee80211_vif *vif = NULL; - int ret; + int ret = 1; enum iwl_d3_status d3_status; bool keep = false; + bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); + + u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE | + CMD_WAKE_UP_TRANS; mutex_lock(&mvm->mutex); @@ -2052,7 +2069,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) if (IS_ERR_OR_NULL(vif)) goto err; - ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test); + ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image); if (ret) goto err; @@ -2095,17 +2112,28 @@ out_iterate: iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); out: - /* return 1 to reconfigure the device */ + if (unified_image && !ret) { + ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL); + if (!ret) /* D3 ended successfully - no need to reset device */ + return 0; + } + + /* + * Reconfigure the device in one of the following cases: + * 1. We are not using a unified image + * 2. We are using a unified image but had an error while exiting D3 + */ set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); - - /* We always return 1, which causes mac80211 to do a reconfig - * with IEEE80211_RECONFIG_TYPE_RESTART. This type of - * reconfig calls iwl_mvm_restart_complete(), where we unref - * the IWL_MVM_REF_UCODE_DOWN, so we need to take the - * reference here. + /* + * When switching images we return 1, which causes mac80211 + * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART. + * This type of reconfig calls iwl_mvm_restart_complete(), + * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need + * to take the reference here. */ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); + return 1; } @@ -2122,7 +2150,7 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm) enum iwl_d3_status d3_status; struct iwl_trans *trans = mvm->trans; - iwl_trans_d3_resume(trans, &d3_status, false); + iwl_trans_d3_resume(trans, &d3_status, false, false); /* * make sure to clear D0I3_DEFER_WAKEUP before diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 9a15642f80dd..0476e7688e9b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1109,7 +1109,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params, vif)); - if (type == IWL_MVM_SCAN_SCHED) + if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT) cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); if (iwl_mvm_scan_use_ebs(mvm, vif)) @@ -1351,7 +1351,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0); - ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, IWL_MVM_SCAN_SCHED); + ret = iwl_mvm_scan_umac(mvm, vif, ¶ms, type); } else { hcmd.id = SCAN_OFFLOAD_REQUEST_CMD; ret = iwl_mvm_scan_lmac(mvm, vif, ¶ms); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 16b579a5aa6b..762e7c4d96b6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -787,7 +787,7 @@ static int iwl_pci_runtime_suspend(struct device *device) trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3; - iwl_trans_d3_suspend(trans, false); + iwl_trans_d3_suspend(trans, false, false); return 0; } @@ -800,7 +800,7 @@ static int iwl_pci_runtime_resume(struct device *device) IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n"); - iwl_trans_d3_resume(trans, &d3_status, false); + iwl_trans_d3_resume(trans, &d3_status, false, false); if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) return iwl_pci_fw_exit_d0i3(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index cfdc7f6e554a..abe091698471 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1219,11 +1219,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) _iwl_trans_pcie_stop_device(trans, true); } -static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) +static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, + bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { + if (!reset) { /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PERSIST_MODE); @@ -1247,7 +1248,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) { + if (reset) { /* * reset TX queues -- some of their registers reset during S3 * so if we don't reset everything here the D3 image would try @@ -1261,7 +1262,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test) static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test) + bool test, bool reset) { u32 val; int ret; @@ -1296,7 +1297,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_set_pwr(trans, false); - if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) { + if (!reset) { iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } else { -- cgit v1.2.3 From 15ffd075e4088d3fc83ce8db914e7d3b6f31bc1c Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 24 Jan 2016 23:04:22 +0200 Subject: iwlwifi: various comments and code cleanups No need to include net/ip6_checksum.h twice. Remove TODOs. Remove trailing space. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 3 --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 - drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 1 - 4 files changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index e6a5e99f95e0..0ca0f13b69b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -506,7 +506,7 @@ struct iwl_trans_config { bool sw_csum_tx; const struct iwl_hcmd_arr *command_groups; int command_groups_size; - + u32 sdio_adma_addr; }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index fb6d341d6f3d..ab467cb9b97c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -287,16 +287,13 @@ enum iwl_rx_mpdu_status { IWL_RX_MPDU_STATUS_KEY_ERROR = BIT(4), IWL_RX_MPDU_STATUS_ICV_OK = BIT(5), IWL_RX_MPDU_STATUS_MIC_OK = BIT(6), - /* TODO - verify this is the correct value */ IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7), IWL_RX_MPDU_STATUS_SEC_MASK = 0x7 << 8, IWL_RX_MPDU_STATUS_SEC_NONE = 0x0 << 8, IWL_RX_MPDU_STATUS_SEC_WEP = 0x1 << 8, IWL_RX_MPDU_STATUS_SEC_CCM = 0x2 << 8, IWL_RX_MPDU_STATUS_SEC_TKIP = 0x3 << 8, - /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */ IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8, - /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */ IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8, IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11), IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 89ea70deeb84..6bf2bde239a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -392,7 +392,6 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), }; - /* this forward declaration can avoid to export the function */ static void iwl_mvm_async_handlers_wk(struct work_struct *wk); static void iwl_mvm_d0i3_exit_work(struct work_struct *wk); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index c499345ba526..837a7d536874 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -34,7 +34,6 @@ #include #include #include -#include #include "iwl-debug.h" #include "iwl-csr.h" -- cgit v1.2.3 From bdf408eca877a2ae310e3df9f05e865ffe10e4de Mon Sep 17 00:00:00 2001 From: Hubert Tarasiuk Date: Sun, 24 Jan 2016 13:35:06 +0100 Subject: iwlwifi: dvm: handle zero brightness for wifi LED In order to have the LED being OFF constantly when the brightness is set to 0, we need to pass IWL_LED_SOLID to iwl_led_cmd as the off parameter, otherwise the led will stay on constantly. This fixes https://bugzilla.kernel.org/show_bug.cgi?id=110551 Signed-off-by: Hubert Tarasiuk [reworked the commit message] Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/dvm/led.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c index 1aabb5ec096f..1bbd17ada974 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c @@ -152,11 +152,14 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev, { struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led); unsigned long on = 0; + unsigned long off = 0; if (brightness > 0) on = IWL_LED_SOLID; + else + off = IWL_LED_SOLID; - iwl_led_cmd(priv, on, 0); + iwl_led_cmd(priv, on, off); } static int iwl_led_blink_set(struct led_classdev *led_cdev, -- cgit v1.2.3 From d1cae0fd22e9d0c55c97da29dcdfe28d99d96ea6 Mon Sep 17 00:00:00 2001 From: Rodrigo Freire Date: Wed, 6 Jan 2016 20:24:01 -0200 Subject: iwlwifi: Document missing module options This patch documents two missing module options in the internal code comment block. Signed-off-by: Rodrigo Freire Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index fd42f63f5e84..b88ecc7892a9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -108,6 +108,8 @@ enum iwl_amsdu_size { * @power_level: power level, default = 1 * @debug_level: levels are IWL_DL_* * @ant_coupling: antenna coupling in dB, default = 0 + * @nvm_file: specifies a external NVM file + * @uapsd_disable: disable U-APSD, default = 1 * @d0i3_disable: disable d0i3, default = 1, * @d0i3_entry_delay: time to wait after no refs are taken before * entering D0i3 (in msecs) -- cgit v1.2.3 From 25657fec94f87308da19bb3d5063feef04d5ba20 Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Wed, 2 Sep 2015 12:34:23 +0300 Subject: iwlwifi: mvm: add trigger for firmware dump upon TX response status This will allow to collect the data when the firmware sends a specific tx response status. Signed-off-by: Golan Ben-Ami Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/iwl-fw-error-dump.h | 3 ++ drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 13 +++++++++ drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 33 ++++++++++++++++++++++ 3 files changed, 49 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h index a5aaf6853704..8425e1a587d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h @@ -293,6 +293,8 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data) * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a * threshold. * @FW_DBG_TDLS: trigger log collection upon TDLS related events. + * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when + * the firmware sends a tx reply. */ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_INVALID = 0, @@ -309,6 +311,7 @@ enum iwl_fw_dbg_trigger { FW_DBG_TRIGGER_BA, FW_DBG_TRIGGER_TX_LATENCY, FW_DBG_TRIGGER_TDLS, + FW_DBG_TRIGGER_TX_STATUS, /* must be last */ FW_DBG_TRIGGER_MAX, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 2273908ad83a..4050b4af7e45 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -749,6 +749,19 @@ struct iwl_fw_dbg_trigger_tdls { u8 reserved[4]; } __packed; +/** + * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response + * status. + * @statuses: the list of statuses to trigger the collection on + */ +struct iwl_fw_dbg_trigger_tx_status { + struct tx_status { + u8 status; + u8 reserved[3]; + } __packed statuses[16]; + __le32 reserved[2]; +} __packed; + /** * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration. * @id: conf id diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8bf48a7d0f4e..5d73db2534a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -736,6 +736,37 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags, iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r); } +static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm, + u32 status) +{ + struct iwl_fw_dbg_trigger_tlv *trig; + struct iwl_fw_dbg_trigger_tx_status *status_trig; + int i; + + if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS)) + return; + + trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS); + status_trig = (void *)trig->data; + + if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig)) + return; + + for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) { + /* don't collect on status 0 */ + if (!status_trig->statuses[i].status) + break; + + if (status_trig->statuses[i].status != (status & TX_STATUS_MSK)) + continue; + + iwl_mvm_fw_dbg_collect_trig(mvm, trig, + "Tx status %d was received", + status & TX_STATUS_MSK); + break; + } +} + static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { @@ -784,6 +815,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + iwl_mvm_tx_status_check_trigger(mvm, status); + info->status.rates[0].count = tx_resp->failure_frame + 1; iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate), info); -- cgit v1.2.3 From ee95ed3728cb3ed70b76fb770fff66133ff8b1fa Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 25 Nov 2015 13:17:10 +0200 Subject: iwlwifi: mvm: Add P2P client snoozing Enable snoozing and U-APSD on P2P client. The firwmare will support this only if the BSS vif is not associated. Make this configurable by a constant variable and disable it by default. Signed-off-by: Avri Altman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 8 +++++ drivers/net/wireless/intel/iwlwifi/mvm/power.c | 39 ++++++++++++++++++++-- 4 files changed, 47 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 4050b4af7e45..dbf08c493654 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -302,6 +302,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics + * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different * sources for the MCC. This TLV bit is a future replacement to @@ -336,6 +337,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = (__force iwl_ucode_tlv_capa_t)19, IWL_UCODE_TLV_CAPA_CSUM_SUPPORT = (__force iwl_ucode_tlv_capa_t)21, IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS = (__force iwl_ucode_tlv_capa_t)22, + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = (__force iwl_ucode_tlv_capa_t)26, IWL_UCODE_TLV_CAPA_BT_COEX_PLCR = (__force iwl_ucode_tlv_capa_t)28, IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC = (__force iwl_ucode_tlv_capa_t)29, IWL_UCODE_TLV_CAPA_BT_COEX_RRC = (__force iwl_ucode_tlv_capa_t)30, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index b00c03fcd447..959fc4d33d16 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -73,6 +73,7 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ +#define IWL_MVM_P2P_UAPSD_STANDALONE 0 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5f3ac8cccf49..5a92ab1b3787 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1005,6 +1005,14 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm) IWL_MVM_BT_COEX_MPLUT; } +static inline +bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) && + IWL_MVM_P2P_UAPSD_STANDALONE; +} + static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { /* firmware flag isn't defined yet */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 9de159f1ef2d..16b3e36fb1ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -259,6 +259,26 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } +static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + bool *is_p2p_standalone = _data; + + switch (ieee80211_vif_type_p2p(vif)) { + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_AP: + *is_p2p_standalone = false; + break; + case NL80211_IFTYPE_STATION: + if (vif->bss_conf.assoc) + *is_p2p_standalone = false; + break; + + default: + break; + } +} + static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -268,9 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, ETH_ALEN)) return false; - if (vif->p2p && - !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD)) - return false; /* * Avoid using uAPSD if P2P client is associated to GO that uses * opportunistic power save. This is due to current FW limitation. @@ -287,6 +304,22 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, if (iwl_mvm_phy_ctx_count(mvm) >= 2) return false; + if (vif->p2p) { + /* Allow U-APSD only if p2p is stand alone */ + bool is_p2p_standalone = true; + + if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) + return false; + + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_p2p_standalone_iterator, + &is_p2p_standalone); + + if (!is_p2p_standalone) + return false; + } + return true; } -- cgit v1.2.3 From 33efe947e797c0f26b81bfe1b1eaf53501a6cd8e Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Wed, 23 Dec 2015 17:53:27 +0200 Subject: iwlwifi: mvm: make collecting fw debug data optional Slow platforms may have issues with dumping data upon firmware assert. Make it easier to disable it for those platform. Signed-off-by: Golan Ben-Ami Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/constants.h | 7 +++++-- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 4 ++++ 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 959fc4d33d16..4b560e4417ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -6,7 +6,8 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +33,8 @@ * BSD LICENSE * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2015 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -108,6 +110,7 @@ #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_SW_TX_CSUM_OFFLOAD 0 +#define IWL_MVM_COLLECT_FW_ERR_DUMP 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 0813f8184e10..94ec3f0c15cf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -435,6 +435,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) bool monitor_dump_only = false; int i; + if (!IWL_MVM_COLLECT_FW_ERR_DUMP && + !mvm->trans->dbg_dest_tlv) + return; + lockdep_assert_held(&mvm->mutex); /* there's no point in fw dump if the bus is dead */ -- cgit v1.2.3 From d8f7c5115d09e4b6b72b5b33414853aaa39bf837 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 14 Jan 2016 10:46:38 +0100 Subject: iwlwifi: mvm: remove shadowing variable The outer scope has a perfectly suitable 'i' variable, use it instead of adding a shadowing one in the inner scope. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 94ec3f0c15cf..4856eac120f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c @@ -644,8 +644,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm) /* Dump fw's virtual image */ if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) { - u32 i; - for (i = 1; i < mvm->num_of_paging_blk + 1; i++) { struct iwl_fw_error_dump_paging *paging; struct page *pages = -- cgit v1.2.3 From 27e070d3d3c2f6ab925ffd7c54814cabd95db51c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 14 Jan 2016 10:55:22 +0100 Subject: iwlwifi: mvm: fix debugfs signedness warning Using kstrtouint() with a signed int isn't really right, use kstrotoint() instead. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 90500e2d107b..8059efafcb80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -983,7 +983,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm, trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) return -EOPNOTSUPP; - ret = kstrtouint(buf, 0, &rec_mode); + ret = kstrtoint(buf, 0, &rec_mode); if (ret) return ret; -- cgit v1.2.3 From 7869318e4296e27d4cb97505e8739d62b48b2d58 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Sun, 27 Dec 2015 13:45:42 +0200 Subject: iwlwifi: mvm: add support for negative temperatures The driver should support also negative temperatures. So there is a need to separate between the return value and temperature in order to be able to distinguish between a negative temperature and error value. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 9 +++++---- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 17 ++++++++--------- 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 8059efafcb80..5c0f93997b7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -261,17 +261,18 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file, { struct iwl_mvm *mvm = file->private_data; char buf[16]; - int pos, temp; + int pos, ret; + s32 temp; if (!mvm->ucode_loaded) return -EIO; mutex_lock(&mvm->mutex); - temp = iwl_mvm_get_temp(mvm); + ret = iwl_mvm_get_temp(mvm, &temp); mutex_unlock(&mvm->mutex); - if (temp < 0) - return temp; + if (ret) + return -EIO; pos = scnprintf(buf , sizeof(buf), "%d\n", temp); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5a92ab1b3787..216c8d6275a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1489,7 +1489,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm); void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); -int iwl_mvm_get_temp(struct iwl_mvm *mvm); +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index fb76004eede4..758d05a8c6aa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -194,12 +194,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd); } -int iwl_mvm_get_temp(struct iwl_mvm *mvm) +int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp) { struct iwl_notification_wait wait_temp_notif; static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE) }; - int ret, temp; + int ret; if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR)) temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION; @@ -208,7 +208,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif, temp_notif, ARRAY_SIZE(temp_notif), - iwl_mvm_temp_notif_wait, &temp); + iwl_mvm_temp_notif_wait, temp); ret = iwl_mvm_get_temp_cmd(mvm); if (ret) { @@ -219,12 +219,10 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm) ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif, IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT); - if (ret) { + if (ret) IWL_ERR(mvm, "Getting the temperature timed out\n"); - return ret; - } - return temp; + return ret; } static void check_exit_ctkill(struct work_struct *work) @@ -233,6 +231,7 @@ static void check_exit_ctkill(struct work_struct *work) struct iwl_mvm *mvm; u32 duration; s32 temp; + int ret; tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); @@ -250,13 +249,13 @@ static void check_exit_ctkill(struct work_struct *work) goto reschedule; } - temp = iwl_mvm_get_temp(mvm); + ret = iwl_mvm_get_temp(mvm, &temp); iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL); __iwl_mvm_mac_stop(mvm); - if (temp < 0) + if (ret) goto reschedule; IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp); -- cgit v1.2.3 From 0db056d32490181be48efc2ca7cfa7494b13e6b5 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 29 Dec 2015 11:07:15 +0200 Subject: iwlwifi: mvm: support beacon storing Currently firmware is configured to filter out beacons. In case a beacon was changed - it is waking the host. However, some vendors change their IEs frequently without any significant change, and redundant wakeups are triggered as a result. As a solution disable beacon filtering when entering d0i3. Instead, firmware will store the latest beacon and upon exiting d0i3 it will send it up to the host, so the host can act upon changes (if there were any). This beacon will arrive as a dedicated notification - support it as well. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 3 ++ drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 3 +- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 29 +++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 41 ++++++++++++++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 22 ++++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 33 ++++++++++++----- 8 files changed, 120 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index dbf08c493654..5152987c8b8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -315,6 +315,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what * antenna the beacon should be transmitted + * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon + * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used @@ -346,6 +348,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, + IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, NUM_IWL_UCODE_TLV_CAPA diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 78572ef89b26..346376187ef8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -853,7 +853,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | - ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | + ENABLE_STORE_BEACON; /* Query the last used seqno and set it */ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h index 62b9a0a96700..eec52c57f718 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h @@ -251,6 +251,7 @@ enum iwl_wowlan_flags { ENABLE_L3_FILTERING = BIT(1), ENABLE_NBNS_FILTERING = BIT(2), ENABLE_DHCP_FILTERING = BIT(3), + ENABLE_STORE_BEACON = BIT(4), }; struct iwl_wowlan_config_cmd { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index 82049bb139c2..b6b57273b8ba 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -280,11 +280,16 @@ enum iwl_phy_ops_subcmd_ids { DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; +enum iwl_prot_offload_subcmd_ids { + STORED_BEACON_NTF = 0xFF, +}; + /* command groups */ enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, PHY_OPS_GROUP = 0x4, + PROT_OFFLOAD_GROUP = 0xb, }; /** @@ -1851,4 +1856,28 @@ struct iwl_shared_mem_cfg { __le32 page_buff_size; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ +#define MAX_STORED_BEACON_SIZE 600 + +/** + * Stored beacon notification + * + * @system_time: system time on air rise + * @tsf: TSF on air rise + * @beacon_timestamp: beacon on air rise + * @phy_flags: general phy flags: band, modulation, etc. + * @channel: channel this beacon was received on + * @rates: rate in ucode internal format + * @byte_count: frame's byte count + */ +struct iwl_stored_beacon_notif { + __le32 system_time; + __le64 tsf; + __le32 beacon_timestamp; + __le16 phy_flags; + __le16 channel; + __le32 rates; + __le32 byte_count; + u8 data[MAX_STORED_BEACON_SIZE]; +} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index bf1e5eb5dbdb..62927f567afc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1462,3 +1462,40 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, iwl_mvm_beacon_loss_iterator, mb); } + +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_stored_beacon_notif *sb = (void *)pkt->data; + struct ieee80211_rx_status rx_status; + struct sk_buff *skb; + u32 size = le32_to_cpu(sb->byte_count); + + if (size == 0) + return; + + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + + /* update rx_status according to the notification's metadata */ + memset(&rx_status, 0, sizeof(rx_status)); + rx_status.mactime = le64_to_cpu(sb->tsf); + rx_status.device_timestamp = le32_to_cpu(sb->system_time); + rx_status.band = + (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? + IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ; + rx_status.freq = + ieee80211_channel_to_frequency(le16_to_cpu(sb->channel), + rx_status.band); + + /* copy the data */ + memcpy(skb_put(skb, size), sb->data, size); + memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); + + /* pass it as regular rx to mac80211 */ + ieee80211_rx_napi(mvm->hw, skb, NULL); +} diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 216c8d6275a1..b45cf4f63781 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1192,6 +1192,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 6bf2bde239a0..3c869ad6aaec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -33,6 +33,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -267,6 +268,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { true), RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), + RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, + iwl_mvm_rx_stored_beacon_notif, false), }; #undef RX_HANDLER @@ -386,10 +389,18 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = { + HCMD_NAME(STORED_BEACON_NTF), +}; + static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), + [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), }; /* this forward declaration can avoid to export the function */ @@ -1195,7 +1206,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm, cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; cmd->offloading_tid = iter_data->offloading_tid; cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING | - ENABLE_DHCP_FILTERING; + ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON; /* * The d0i3 uCode takes care of the nonqos counters, * so configure only the qos seq ones. @@ -1216,8 +1227,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) struct iwl_wowlan_config_cmd wowlan_config_cmd = { .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME | IWL_WOWLAN_WAKEUP_BEACON_MISS | - IWL_WOWLAN_WAKEUP_LINK_CHANGE | - IWL_WOWLAN_WAKEUP_BCN_FILTERING), + IWL_WOWLAN_WAKEUP_LINK_CHANGE), }; struct iwl_d3_manager_config d3_cfg_cmd = { .min_sleep_time = cpu_to_le32(1000), @@ -1267,6 +1277,12 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode) /* configure wowlan configuration only if needed */ if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) { + /* wake on beacons only if beacon storing isn't supported */ + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_STORING)) + wowlan_config_cmd.wakeup_filter |= + cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING); + iwl_mvm_wowlan_config_key_params(mvm, d0i3_iter_data.connected_vif, true, flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 16b3e36fb1ba..0d03b3e3cf4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -903,9 +903,9 @@ static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); } -int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - u32 flags) +static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags, bool d0i3) { struct iwl_beacon_filter_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -916,12 +916,20 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags); - if (!ret) + /* don't change bf_enabled in case of temporary d0i3 configuration */ + if (!ret && !d0i3) mvmvif->bf_data.bf_enabled = false; return ret; } +int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + u32 flags) +{ + return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false); +} + static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) { bool disable_ps; @@ -1058,8 +1066,17 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, IWL_BF_CMD_CONFIG_D0I3, .bf_enable_beacon_filter = cpu_to_le32(1), }; - ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, - flags, true); + /* + * When beacon storing is supported - disable beacon filtering + * altogether - the latest beacon will be sent when exiting d0i3 + */ + if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_BEACON_STORING)) + ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags, + true); + else + ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf, + flags, true); } else { if (mvmvif->bf_data.bf_enabled) ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags); -- cgit v1.2.3 From b525d08125e61e262be63ea250e3e13e93b6a92a Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 6 Jan 2016 10:01:41 +0100 Subject: iwlwifi: mvm: track low-latency sources separately To be able to test low-latency behaviour properly, split the different low-latency sources so that setting any one of them, for example from debugfs, is sufficient; this avoids getting the debug setting overwritten by other sources. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 19 ++++++++++++++----- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 12 +++++++----- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 6 ++---- 3 files changed, 23 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 9e0d46368cdd..c286a5f8d8bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1255,6 +1257,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = mvmvif->mvm; + bool prev; u8 value; int ret; @@ -1265,7 +1268,9 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf, return -EINVAL; mutex_lock(&mvm->mutex); - iwl_mvm_update_low_latency(mvm, vif, value); + prev = iwl_mvm_vif_low_latency(mvmvif); + mvmvif->low_latency_dbgfs = value; + iwl_mvm_update_low_latency(mvm, vif, prev); mutex_unlock(&mvm->mutex); return count; @@ -1277,11 +1282,15 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, { struct ieee80211_vif *vif = file->private_data; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - char buf[2]; + char buf[30] = {}; + int len; - buf[0] = mvmvif->low_latency ? '1' : '0'; - buf[1] = '\n'; - return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); + len = snprintf(buf, sizeof(buf) - 1, + "traffic=%d\ndbgfs=%d\nvcmd=%d\n", + mvmvif->low_latency_traffic, + mvmvif->low_latency_dbgfs, + mvmvif->low_latency_vcmd); + return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b45cf4f63781..418aeffd9bab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -346,8 +346,9 @@ struct iwl_mvm_vif_bf_data { * @pm_enabled - Indicate if MAC power management is allowed * @monitor_active: indicates that monitor context is configured, and that the * interface should get quota etc. - * @low_latency: indicates that this interface is in low-latency mode - * (VMACLowLatencyMode) + * @low_latency_traffic: indicates low latency traffic was detected + * @low_latency_dbgfs: low latency mode set from debugfs + * @low_latency_vcmd: low latency mode set from vendor command * @ps_disabled: indicates that this interface requires PS to be disabled * @queue_params: QoS params for this MAC * @bcast_sta: station used for broadcast packets. Used by the following @@ -375,7 +376,7 @@ struct iwl_mvm_vif { bool ap_ibss_active; bool pm_enabled; bool monitor_active; - bool low_latency; + bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd; bool ps_disabled; struct iwl_mvm_vif_bf_data bf_data; @@ -1427,8 +1428,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) * binding, so this has no real impact. For now, just return * the current desired low-latency state. */ - - return mvmvif->low_latency; + return mvmvif->low_latency_dbgfs || + mvmvif->low_latency_traffic || + mvmvif->low_latency_vcmd; } /* hw scheduler queue config */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 3a989f5c20db..59453c176580 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -937,18 +937,16 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) } int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - bool value) + bool prev) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int res; lockdep_assert_held(&mvm->mutex); - if (mvmvif->low_latency == value) + if (iwl_mvm_vif_low_latency(mvmvif) == prev) return 0; - mvmvif->low_latency = value; - res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) return res; -- cgit v1.2.3 From a80c1cf9b4b2937cfe782aba7b54a5144fe7eec2 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 13 Jan 2016 15:01:00 +0100 Subject: iwlwifi: mvm: support setting minimum quota from debugfs For debug purposes, allow setting minimum quota (for a single virtual interface) from debugfs. This is an absolute minimum, so it can only be set up to 95%. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/mvm/debugfs-vif.c | 56 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/quota.c | 16 +++++++ 3 files changed, 73 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index c286a5f8d8bd..14004456bf55 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -1372,6 +1372,59 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf)); } +static void iwl_dbgfs_quota_check(void *data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + int *ret = data; + + if (mvmvif->dbgfs_quota_min) + *ret = -EINVAL; +} + +static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + if (value > 95) + return -EINVAL; + + mutex_lock(&mvm->mutex); + + mvmvif->dbgfs_quota_min = 0; + ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_dbgfs_quota_check, &ret); + if (ret == 0) { + mvmvif->dbgfs_quota_min = value; + iwl_mvm_update_quotas(mvm, false, NULL); + } + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + +static ssize_t iwl_dbgfs_quota_min_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + char buf[10]; + int len; + + len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ @@ -1395,6 +1448,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32); MVM_DEBUGFS_READ_FILE_OPS(tof_range_response); MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { @@ -1432,6 +1486,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif) S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, S_IRUSR | S_IWUSR); + MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, + S_IRUSR | S_IWUSR); if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && mvmvif == mvm->bf_allowed_vif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 418aeffd9bab..5a34808fdeed 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -433,6 +433,7 @@ struct iwl_mvm_vif { struct iwl_dbgfs_pm dbgfs_pm; struct iwl_dbgfs_bf dbgfs_bf; struct iwl_mac_power_cmd mac_pwr_cmd; + int dbgfs_quota_min; #endif enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c index 0b762b4f8fad..2141db5bff82 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -74,6 +76,9 @@ struct iwl_mvm_quota_iterator_data { int n_interfaces[MAX_BINDINGS]; int colors[MAX_BINDINGS]; int low_latency[MAX_BINDINGS]; +#ifdef CONFIG_IWLWIFI_DEBUGFS + int dbgfs_min[MAX_BINDINGS]; +#endif int n_low_latency_bindings; struct ieee80211_vif *disabled_vif; }; @@ -129,6 +134,12 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac, data->n_interfaces[id]++; +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->dbgfs_quota_min) + data->dbgfs_min[id] = max(data->dbgfs_min[id], + mvmvif->dbgfs_quota_min); +#endif + if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) { data->n_low_latency_bindings++; data->low_latency[id] = true; @@ -259,6 +270,11 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, if (data.n_interfaces[i] <= 0) cmd.quotas[idx].quota = cpu_to_le32(0); +#ifdef CONFIG_IWLWIFI_DEBUGFS + else if (data.dbgfs_min[i]) + cmd.quotas[idx].quota = + cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100); +#endif else if (data.n_low_latency_bindings == 1 && n_non_lowlat && data.low_latency[i]) /* -- cgit v1.2.3 From 0c1c6e37137fc2333ef9898c24ea0cb9efbd8df0 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 16 Dec 2015 21:17:06 +0200 Subject: iwlwifi: mvm: change access to ieee80211_hdr Make the code clearer. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 0c073e02fd4c..4cce37238a08 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -294,7 +294,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; - struct ieee80211_hdr *hdr = (void *)(desc + 1); + struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); u32 len = le16_to_cpu(desc->mpdu_len); u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); struct ieee80211_sta *sta = NULL; -- cgit v1.2.3 From 1c4e15a23d27e2efd56d2b9b7436bff060f126ef Mon Sep 17 00:00:00 2001 From: Max Stepanov Date: Mon, 4 Jan 2016 11:09:14 +0200 Subject: iwlwifi: mvm: add debug print if scan config is ignored Print a debug message in iwl_mvm_config_scan() if a scan configuration data is decided not to be sent to FW. Signed-off-by: Max Stepanov Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 0476e7688e9b..1e1ab9daaec9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -930,8 +930,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm) if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels)) return -ENOBUFS; - if (type == mvm->scan_type) + if (type == mvm->scan_type) { + IWL_DEBUG_SCAN(mvm, + "Ignoring UMAC scan config of the same type\n"); return 0; + } cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels; -- cgit v1.2.3 From 837c4da98481d4e504b2aba077c8528fee1b6d13 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 7 Jan 2016 16:50:45 +0200 Subject: iwlwifi: mvm: change the check for ADD_STA status The firmware will return the baid for BA session in the ADD_STA command response. This requires masking the check of the status, which is actually only 8 bits, and not the whole 32 bits. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h | 3 +++ drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 10 +++++----- 2 files changed, 8 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 6fca4fb1d306..1eb3983decc3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -253,6 +253,9 @@ struct iwl_mvm_keyinfo { __le64 hw_tkip_mic_tx_key; } __packed; +#define IWL_ADD_STA_STATUS_MASK 0xFF +#define IWL_ADD_STA_BAID_MASK 0xFF00 + /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b556e33658d7..84b7fd1c4c70 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -192,7 +192,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); break; @@ -362,7 +362,7 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", mvmsta->sta_id); @@ -628,7 +628,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "Internal station added.\n"); return 0; @@ -851,7 +851,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", start ? "start" : "stopp"); @@ -909,7 +909,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (ret) return ret; - switch (status) { + switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: break; default: -- cgit v1.2.3 From 81f02ba3dbe0c9133dc4490f0a7d3acd99a89aad Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 30 Dec 2015 23:58:29 +0200 Subject: iwlwifi: mvm: add tlv for multi queue rx support Previous patches enabled the multi-queue rx path based on iwl_mvm_has_new_rx_api() which returned false by default. Change it to return the actual value based on the firmware TLV which is now defined. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 5152987c8b8d..e2dbc67a367b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -347,6 +347,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64, IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = (__force iwl_ucode_tlv_capa_t)65, IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = (__force iwl_ucode_tlv_capa_t)67, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = (__force iwl_ucode_tlv_capa_t)68, IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 5a34808fdeed..f87aa972fdce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1017,8 +1017,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) { - /* firmware flag isn't defined yet */ - return false; + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } extern const u8 iwl_mvm_ac_to_tx_fifo[]; -- cgit v1.2.3 From c257d5fb52872213f33016c0750bc2334a781700 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Sun, 24 Jan 2016 15:28:43 +0100 Subject: iwlwifi: treat iwl_parse_nvm_data() MAC addr as little endian The MAC address parameters passed to iwl_parse_nvm_data() are passed on to iwl_set_hw_address_family_8000() which treats them as little endian. Annotate them as such, and add the missing byte-swapping in mvm. While at it, add the MAC address to the error to make debugging issues with it easier. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 7 ++++--- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 8 +++++--- 3 files changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 7b89bfc8c8ac..50f4cc60cf3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -539,7 +539,7 @@ static void iwl_set_hw_address_family_8000(struct device *dev, struct iwl_nvm_data *data, const __le16 *mac_override, const __le16 *nvm_hw, - u32 mac_addr0, u32 mac_addr1) + __le32 mac_addr0, __le32 mac_addr1) { const u8 *hw_addr; @@ -583,7 +583,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev, if (!is_valid_ether_addr(data->hw_addr)) IWL_ERR_DEV(dev, - "mac address from hw section is not valid\n"); + "mac address (%pM) from hw section is not valid\n", + data->hw_addr); return; } @@ -597,7 +598,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1) + __le32 mac_addr0, __le32 mac_addr1) { struct iwl_nvm_data *data; u32 sku; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 92466ee72806..4e8e0dc474d4 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - u32 mac_addr0, u32 mac_addr1); + __le32 mac_addr0, __le32 mac_addr1); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 7a3da2da6fd0..c446e0da9789 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -300,7 +300,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; - u32 mac_addr0, mac_addr1; + __le32 mac_addr0, mac_addr1; /* Checking for required sections */ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { @@ -337,8 +337,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) return NULL; /* read the mac address from WFMP registers */ - mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0); - mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1); + mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, + WFMP_MAC_ADDR_0)); + mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, + WFMP_MAC_ADDR_1)); hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; -- cgit v1.2.3 From 8812182e5df31e49cd888bafa36bcccf87787940 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Sun, 24 Jan 2016 15:18:59 +0200 Subject: iwlwifi: mvm: Remove bf_vif from iwl_power_vifs This member is actually not needed as beacon abort is only allowed for a bss station. Signed-off-by: Avri Altman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 28 ++++++++++++-------------- 1 file changed, 13 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 0d03b3e3cf4e..83e686dd86ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -577,7 +577,6 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm, struct iwl_power_vifs { struct iwl_mvm *mvm; - struct ieee80211_vif *bf_vif; struct ieee80211_vif *bss_vif; struct ieee80211_vif *p2p_vif; struct ieee80211_vif *ap_vif; @@ -650,11 +649,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac, if (mvmvif->phy_ctxt) if (mvmvif->phy_ctxt->id < MAX_PHYS) power_iterator->bss_active = true; - - if (mvmvif->bf_data.bf_enabled && - !WARN_ON(power_iterator->bf_vif)) - power_iterator->bf_vif = vif; - break; default: @@ -959,21 +953,19 @@ static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm) } static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, - struct iwl_power_vifs *vifs) + struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); bool ba_enable; - if (!vifs->bf_vif) + if (!mvmvif->bf_data.bf_enabled) return 0; - mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif); - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vifs->bf_vif->bss_conf.ps || + !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif)); - return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable); + return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) @@ -994,7 +986,10 @@ int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) if (ret) return ret; - return iwl_mvm_power_set_ba(mvm, &vifs); + if (vifs.bss_vif) + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); + + return 0; } int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) @@ -1029,7 +1024,10 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm) return ret; } - return iwl_mvm_power_set_ba(mvm, &vifs); + if (vifs.bss_vif) + return iwl_mvm_power_set_ba(mvm, vifs.bss_vif); + + return 0; } int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm, -- cgit v1.2.3 From 416ec4f3928544fcedba5a4d98866e74ce44f369 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Sun, 24 Jan 2016 15:56:11 +0200 Subject: iwlwifi: mvm: Remove iwl_mvm_update_beacon_abort It is only called from iwl_mvm_power_set_ba() so simplify things by removing it. Signed-off-by: Avri Altman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/power.c | 37 +++++++++----------------- 1 file changed, 12 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 83e686dd86ee..f313910cd026 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -877,26 +877,6 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm, return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false); } -static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm, - struct ieee80211_vif *vif, - bool enable) -{ - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct iwl_beacon_filter_cmd cmd = { - IWL_BF_CMD_CONFIG_DEFAULTS, - .bf_enable_beacon_filter = cpu_to_le32(1), - }; - - if (!mvmvif->bf_data.bf_enabled) - return 0; - - if (mvm->cur_ucode == IWL_UCODE_WOWLAN) - cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); - - mvmvif->bf_data.ba_enabled = enable; - return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); -} - static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u32 flags, bool d0i3) @@ -956,16 +936,23 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - bool ba_enable; + struct iwl_beacon_filter_cmd cmd = { + IWL_BF_CMD_CONFIG_DEFAULTS, + .bf_enable_beacon_filter = cpu_to_le32(1), + }; if (!mvmvif->bf_data.bf_enabled) return 0; - ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled || - !vif->bss_conf.ps || - iwl_mvm_vif_low_latency(mvmvif)); + if (mvm->cur_ucode == IWL_UCODE_WOWLAN) + cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3); + + mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled || + mvm->ps_disabled || + !vif->bss_conf.ps || + iwl_mvm_vif_low_latency(mvmvif)); - return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable); + return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false); } int iwl_mvm_power_update_ps(struct iwl_mvm *mvm) -- cgit v1.2.3 From 854c57057390e45041a56c3d3228eb805e980c99 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 26 Jan 2016 13:17:47 +0200 Subject: iwlwifi: mvm: add new ADD_STA command version The 9000 hardware introduces the frame releaser, which keeps track of the aggregation window and notifies host of the window status. This requires in turn updating the hardware with the RX BA session window size. Firmware API was changed to enable that, update the driver accordingly. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- .../net/wireless/intel/iwlwifi/mvm/fw-api-sta.h | 66 +++++++++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 40 ++++++++++--- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 2 +- 4 files changed, 99 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h index 1eb3983decc3..90d911394836 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -256,6 +258,65 @@ struct iwl_mvm_keyinfo { #define IWL_ADD_STA_STATUS_MASK 0xFF #define IWL_ADD_STA_BAID_MASK 0xFF00 +/** + * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table. + * ( REPLY_ADD_STA = 0x18 ) + * @add_modify: 1: modify existing, 0: add new station + * @awake_acs: + * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable + * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field. + * @mac_id_n_color: the Mac context this station belongs to + * @addr[ETH_ALEN]: station's MAC address + * @sta_id: index of station in uCode's station table + * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave + * alone. 1 - modify, 0 - don't change. + * @station_flags: look at %iwl_sta_flags + * @station_flags_msk: what of %station_flags have changed + * @add_immediate_ba_tid: tid for which to add block-ack support (Rx) + * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set + * add_immediate_ba_ssn. + * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx) + * Set %STA_MODIFY_REMOVE_BA_TID to use this field + * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with + * add_immediate_ba_tid. + * @sleep_tx_count: number of packets to transmit to station even though it is + * asleep. Used to synchronise PS-poll and u-APSD responses while ucode + * keeps track of STA sleep state. + * @sleep_state_flags: Look at %iwl_sta_sleep_flag. + * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP + * mac-addr. + * @beamform_flags: beam forming controls + * @tfd_queue_msk: tfd queues used by this station + * + * The device contains an internal table of per-station information, with info + * on security keys, aggregation parameters, and Tx rates for initial Tx + * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD). + * + * ADD_STA sets up the table entry for one station, either creating a new + * entry, or modifying a pre-existing one. + */ +struct iwl_mvm_add_sta_cmd_v7 { + u8 add_modify; + u8 awake_acs; + __le16 tid_disable_tx; + __le32 mac_id_n_color; + u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */ + __le16 reserved2; + u8 sta_id; + u8 modify_mask; + __le16 reserved3; + __le32 station_flags; + __le32 station_flags_msk; + u8 add_immediate_ba_tid; + u8 remove_immediate_ba_tid; + __le16 add_immediate_ba_ssn; + __le16 sleep_tx_count; + __le16 sleep_state_flags; + __le16 assoc_id; + __le16 beamform_flags; + __le32 tfd_queue_msk; +} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + /** * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table. * ( REPLY_ADD_STA = 0x18 ) @@ -285,6 +346,7 @@ struct iwl_mvm_keyinfo { * mac-addr. * @beamform_flags: beam forming controls * @tfd_queue_msk: tfd queues used by this station + * @rx_ba_window: aggregation window size * * The device contains an internal table of per-station information, with info * on security keys, aggregation parameters, and Tx rates for initial Tx @@ -313,7 +375,9 @@ struct iwl_mvm_add_sta_cmd { __le16 assoc_id; __le16 beamform_flags; __le32 tfd_queue_msk; -} __packed; /* ADD_STA_CMD_API_S_VER_7 */ + __le16 rx_ba_window; + __le16 reserved; +} __packed; /* ADD_STA_CMD_API_S_VER_8 */ /** * struct iwl_mvm_add_sta_key_cmd - add/modify sta key diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d70a1716f3e0..01476f545695 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -884,10 +884,10 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = -EINVAL; break; } - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size); break; case IEEE80211_AMPDU_RX_STOP: - ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); + ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size); break; case IEEE80211_AMPDU_TX_START: if (!iwl_enable_tx_ampdu(mvm->cfg)) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 84b7fd1c4c70..4854e79cbda8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -68,6 +70,18 @@ #include "sta.h" #include "rs.h" +/* + * New version of ADD_STA_sta command added new fields at the end of the + * structure, so sending the size of the relevant API's structure is enough to + * support both API versions. + */ +static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) +{ + return iwl_mvm_has_new_rx_api(mvm) ? + sizeof(struct iwl_mvm_add_sta_cmd) : + sizeof(struct iwl_mvm_add_sta_cmd_v7); +} + static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) { @@ -187,7 +201,8 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &add_sta_cmd, &status); if (ret) return ret; @@ -357,7 +372,8 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; @@ -623,7 +639,8 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, if (addr) memcpy(cmd.addr, addr, ETH_ALEN); - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; @@ -819,7 +836,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #define IWL_MAX_RX_BA_SESSIONS 16 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start) + int tid, u16 ssn, bool start, u8 buf_size) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -839,6 +856,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (start) { cmd.add_immediate_ba_tid = (u8) tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); + cmd.rx_ba_window = cpu_to_le16((u16)buf_size); } else { cmd.remove_immediate_ba_tid = (u8) tid; } @@ -846,7 +864,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, STA_MODIFY_REMOVE_BA_TID; status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; @@ -904,7 +923,8 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); status = ADD_STA_SUCCESS; - ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd), + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) return ret; @@ -1640,7 +1660,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } @@ -1731,7 +1752,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, - sizeof(cmd), &cmd); + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } @@ -1766,7 +1787,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, }; int ret; - ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, + iwl_mvm_add_sta_cmd_size(mvm), &cmd); if (ret) IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 39fdf5224e81..e3b9446ee995 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -401,7 +401,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start); + int tid, u16 ssn, bool start, u8 buf_size); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, -- cgit v1.2.3 From 6ad6c01fa74f649722eeca7308dc1fc4e43235cd Mon Sep 17 00:00:00 2001 From: Gregory Greenman Date: Wed, 27 Jan 2016 00:10:15 +0200 Subject: iwlwifi: mvm: rs: fix TPC action decision algorithm Decreasing Tx power is allowed only when success ratio is above the threshold defined in the algorithm. Add this condition. Signed-off-by: Gregory Greenman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 7bb6fd0e4391..986e12ab9dc2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2040,7 +2040,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm, } /* try decreasing first if applicable */ - if (weak != TPC_INVALID) { + if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) && + weak != TPC_INVALID) { if (weak_tpt == IWL_INVALID_VALUE && (strong_tpt == IWL_INVALID_VALUE || current_tpt >= strong_tpt)) { -- cgit v1.2.3 From 43413a975d06e5e34016751fac27e29ec3d4d10f Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 31 Dec 2015 11:49:18 +0200 Subject: iwlwifi: mvm: support rss queues configuration command 9000 series supports multi-queue rx. The hardware needs to be configured with the hash functions to perform and indirection table that maps hash results to the relevant CPUs\queues. Support this configuration. Add debugfs hook to configure the indirection table in order to enable performance analysis. The configuration is stateless, receives a partial or full pattern and sends the command to the firmware. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 44 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 33 +++++++++++++++- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 29 ++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 ++ 6 files changed, 111 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 5c0f93997b7b..005cc09757d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -943,6 +944,47 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf, return count; } +static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + int ret, i, num_repeats, nbytes = count / 2; + + ret = hex2bin(cmd.indirection_table, buf, nbytes); + if (ret) + return ret; + + /* + * The input is the redirection table, partial or full. + * Repeat the pattern if needed. + * For example, input of 01020F will be repeated 42 times, + * indirecting RSS hash results to queues 1, 2, 15 (skipping + * queues 3 - 14). + */ + num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes; + for (i = 1; i < num_repeats; i++) + memcpy(&cmd.indirection_table[i * nbytes], + cmd.indirection_table, nbytes); + /* handle cut in the middle pattern for the last places */ + memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, + ARRAY_SIZE(cmd.indirection_table) % nbytes); + + memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -1455,6 +1497,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16); #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); @@ -1499,6 +1542,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); if (!debugfs_create_bool("enable_scan_iteration_notif", S_IRUSR | S_IWUSR, mvm->debugfs_dir, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index ab467cb9b97c..b45c61e1f45c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -34,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -362,4 +362,33 @@ struct iwl_frame_release { __le16 nssn; }; +enum iwl_rss_hash_func_en { + IWL_RSS_HASH_TYPE_IPV4_TCP, + IWL_RSS_HASH_TYPE_IPV4_UDP, + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD, + IWL_RSS_HASH_TYPE_IPV6_TCP, + IWL_RSS_HASH_TYPE_IPV6_UDP, + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, +}; + +#define IWL_RSS_HASH_KEY_CNT 10 +#define IWL_RSS_INDIRECTION_TABLE_SIZE 128 +#define IWL_RSS_ENABLE 1 + +/** + * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration + * + * @flags: 1 - enable, 0 - disable + * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en + * @secret_key: 320 bit input of random key configuration from driver + * @indirection_table: indirection table + */ +struct iwl_rss_config_cmd { + __le32 flags; + u8 hash_mask; + u8 reserved[3]; + __le32 secret_key[IWL_RSS_HASH_KEY_CNT]; + u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; +} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ + #endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index b6b57273b8ba..f332497e29d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -213,6 +213,8 @@ enum { MFUART_LOAD_NOTIFICATION = 0xb1, + RSS_CONFIG_CMD = 0xb3, + REPLY_RX_PHY_CMD = 0xc0, REPLY_RX_MPDU_CMD = 0xc1, FRAME_RELEASE = 0xc3, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 4ed5180c547b..070e2af05ca2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -107,6 +108,24 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant) sizeof(tx_ant_cmd), &tx_ant_cmd); } +static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) +{ + int i; + struct iwl_rss_config_cmd cmd = { + .flags = cpu_to_le32(IWL_RSS_ENABLE), + .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP | + IWL_RSS_HASH_TYPE_IPV4_PAYLOAD | + IWL_RSS_HASH_TYPE_IPV6_TCP | + IWL_RSS_HASH_TYPE_IPV6_PAYLOAD, + }; + + for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) + cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; + memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); + + return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); +} + static void iwl_free_fw_paging(struct iwl_mvm *mvm) { int i; @@ -894,6 +913,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) goto error; + /* Init RSS configuration */ + if (iwl_mvm_has_new_rx_api(mvm)) { + ret = iwl_send_rss_cfg_cmd(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", + ret); + goto error; + } + } + /* init the fw <-> mac80211 STA mapping */ for (i = 0; i < IWL_MVM_STATION_COUNT; i++) RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f87aa972fdce..747f7eb80f47 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -647,6 +647,7 @@ struct iwl_mvm { atomic_t pending_frames[IWL_MVM_STATION_COUNT]; u32 tfd_drained[IWL_MVM_STATION_COUNT]; u8 rx_ba_sessions; + u32 secret_key[IWL_RSS_HASH_KEY_CNT]; /* configured by mac80211 */ u32 rts_threshold; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 3c869ad6aaec..325ff8aa33f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -347,6 +347,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(MAC_PM_POWER_TABLE), HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION), HCMD_NAME(MFUART_LOAD_NOTIFICATION), + HCMD_NAME(RSS_CONFIG_CMD), HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC), HCMD_NAME(REPLY_RX_PHY_CMD), HCMD_NAME(REPLY_RX_MPDU_CMD), @@ -651,6 +652,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); + /* init RSS hash key */ + get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key)); + return op_mode; out_unregister: -- cgit v1.2.3 From bce977310416a2b6c9ad774dfe2071eaacd3d90b Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 25 Jan 2016 18:14:49 +0200 Subject: iwlwifi: pcie: enable multi-queue rx path Previous patches enabled new 9000 hardware DMA for one queue only. Enable the actual multi-queue path and configuration now. This requires also per-queue NAPI struct. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 35 +++++++++++++++++----- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 14 +-------- 3 files changed, 29 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 7bc02e0cdd93..2f959162d045 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -136,6 +136,7 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; + struct napi_struct napi; struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; }; @@ -345,7 +346,6 @@ struct iwl_trans_pcie { struct iwl_drv *drv; struct net_device napi_dev; - struct napi_struct napi; struct __percpu iwl_tso_hdr_page *tso_hdr_page; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a385f3cddb5d..51314e56209d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -2,6 +2,7 @@ * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -730,7 +731,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } -static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) +static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size, enabled = 0; @@ -759,13 +760,13 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) for (i = 0; i < trans->num_rx_queues; i++) { /* Tell device where to find RBD free table in DRAM */ iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), - (u64)(rxq->bd_dma)); + (u64)(trans_pcie->rxq[i].bd_dma)); /* Tell device where to find RBD used table in DRAM */ iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), - (u64)(rxq->used_bd_dma)); + (u64)(trans_pcie->rxq[i].used_bd_dma)); /* Tell device where in DRAM to update its Rx status */ iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), - rxq->rb_stts_dma); + trans_pcie->rxq[i].rb_stts_dma); /* Reset device indice tables */ iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); @@ -808,6 +809,12 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) rxq->used_count = 0; } +static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +{ + WARN_ON(1); + return 0; +} + int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -857,6 +864,10 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) iwl_pcie_rx_init_rxb_lists(rxq); + if (!rxq->napi.poll) + netif_napi_add(&trans_pcie->napi_dev, &rxq->napi, + iwl_pcie_dummy_napi_poll, 64); + spin_unlock(&rxq->lock); } @@ -878,7 +889,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); if (trans->cfg->mq_rx_supported) { - iwl_pcie_rx_mq_hw_init(trans, def_rxq); + iwl_pcie_rx_mq_hw_init(trans); } else { iwl_pcie_rxq_restock(trans, def_rxq); iwl_pcie_rx_hw_init(trans, def_rxq); @@ -940,6 +951,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->used_bd, rxq->used_bd_dma); rxq->used_bd_dma = 0; rxq->used_bd = NULL; + + if (rxq->napi.poll) + netif_napi_del(&rxq->napi); } kfree(trans_pcie->rxq); } @@ -1055,7 +1069,12 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, index = SEQ_TO_INDEX(sequence); cmd_index = get_cmd_index(&txq->q, index); - iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb); + if (rxq->id == 0) + iwl_op_mode_rx(trans->op_mode, &rxq->napi, + &rxcb); + else + iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi, + &rxcb, rxq->id); if (reclaim) { kzfree(txq->entries[cmd_index].free_buf); @@ -1236,8 +1255,8 @@ restart: if (unlikely(emergency && count)) iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); - if (trans_pcie->napi.poll) - napi_gro_flush(&trans_pcie->napi, false); + if (rxq->napi.poll) + napi_gro_flush(&rxq->napi, false); } /* diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index abe091698471..b796952da644 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1428,12 +1428,6 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } -static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) -{ - WARN_ON(1); - return 0; -} - static void iwl_trans_pcie_configure(struct iwl_trans *trans, const struct iwl_trans_config *trans_cfg) { @@ -1470,11 +1464,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, * As this function may be called again in some corner cases don't * do anything if NAPI was already initialized. */ - if (!trans_pcie->napi.poll) { + if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY) init_dummy_netdev(&trans_pcie->napi_dev); - netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi, - iwl_pcie_dummy_napi_poll, 64); - } } void iwl_trans_pcie_free(struct iwl_trans *trans) @@ -1498,9 +1489,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); - if (trans_pcie->napi.poll) - netif_napi_del(&trans_pcie->napi); - iwl_pcie_free_fw_monitor(trans); for_each_possible_cpu(i) { -- cgit v1.2.3 From d56a7801b544d63b0d32bc8bca0c12a259b1d8e9 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 26 Jan 2016 12:35:13 +0200 Subject: iwlwifi: pcie: update iwl_mpdu_desc fields Final API of iwl_mpdu_desc has a change in the order of the fields and does not include energy from the third antenna (which is perfectly fine, since we don't have one). Update the structure accordingly. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 6 +++--- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 11 ++++------- 2 files changed, 7 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index b45c61e1f45c..df939f51d9b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -347,11 +347,11 @@ struct iwl_rx_mpdu_desc { /* DW8 */ __le32 filter_match; /* DW9 */ - __le32 gp2_on_air_rise; - /* DW10 */ __le32 rate_n_flags; + /* DW10 */ + u8 energy_a, energy_b, channel, reserved; /* DW11 */ - u8 energy_a, energy_b, energy_c, channel; + __le32 gp2_on_air_rise; /* DW12 & DW13 */ __le64 tsf_on_air_rise; } __packed; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 4cce37238a08..615dea143d4e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -201,25 +201,22 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc, struct ieee80211_rx_status *rx_status) { - int energy_a, energy_b, energy_c, max_energy; + int energy_a, energy_b, max_energy; energy_a = desc->energy_a; energy_a = energy_a ? -energy_a : S8_MIN; energy_b = desc->energy_b; energy_b = energy_b ? -energy_b : S8_MIN; - energy_c = desc->energy_c; - energy_c = energy_c ? -energy_c : S8_MIN; max_energy = max(energy_a, energy_b); - max_energy = max(max_energy, energy_c); - IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n", - energy_a, energy_b, energy_c, max_energy); + IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n", + energy_a, energy_b, max_energy); rx_status->signal = max_energy; rx_status->chains = 0; /* TODO: phy info */ rx_status->chain_signal[0] = energy_a; rx_status->chain_signal[1] = energy_b; - rx_status->chain_signal[2] = energy_c; + rx_status->chain_signal[2] = S8_MIN; } static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, -- cgit v1.2.3 From c89e333d5e4a27f736c960b29040b261c21563a5 Mon Sep 17 00:00:00 2001 From: Andrei Otcheretianski Date: Tue, 26 Jan 2016 18:12:28 +0200 Subject: iwlwifi: mvm: allow to disable beacon filtering for AP/GO interface When in AP mode we need to filter in beacons from other APs to update HT operation mode. As a power optimization the beacons are filtered out when there are no associated stations. As a result, when there are no associated stations, we will not update the HT operation mode until a station connects. Add a debugfs parameter that allows to disable this optimization. Signed-off-by: Andrei Otcheretianski Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 3 +++ drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 6 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + 4 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 005cc09757d2..c529e5355803 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1548,6 +1548,9 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) mvm->debugfs_dir, &mvm->scan_iter_notif_enabled)) goto err; + if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR, + mvm->debugfs_dir, &mvm->drop_bcn_ap_mode)) + goto err; #ifdef CONFIG_IWLWIFI_BCAST_FILTERING if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 62927f567afc..535134d639e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -744,7 +744,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm, * wake-ups. */ cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); - if (mvmvif->ap_assoc_sta_count) { + if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) { cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON); IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n"); } else { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 747f7eb80f47..ebe37bb0ce4c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -859,6 +859,12 @@ struct iwl_mvm { u32 ciphers[6]; struct iwl_mvm_tof_data tof_data; + + /* + * Drop beacons from other APs in AP mode when there are no connected + * clients. + */ + bool drop_bcn_ap_mode; }; /* Extract MVM priv from op_mode and _hw */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 325ff8aa33f5..09a94a5efb61 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -492,6 +492,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } mvm->sf_state = SF_UNINIT; mvm->cur_ucode = IWL_UCODE_INIT; + mvm->drop_bcn_ap_mode = true; mutex_init(&mvm->mutex); mutex_init(&mvm->d0i3_suspend_mutex); -- cgit v1.2.3 From 875e94392ad2be9776c8325d3573160eb1455a2b Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Thu, 28 Jan 2016 19:24:02 +0200 Subject: wil6210: prevent access to vring_tx_data lock during its init wil_tx_vring locks the vring_tx_data lock before accessing the TX vring to check if it is enabled and valid for use. In case of quick disconnect / connect events for the same station, spin_lock(&txdata->lock) can be called during the lock initialization in the vring init function. To prevent such a race, the TX vrings spin lock should be initialized once during wil6210 driver initialization. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/main.c | 3 +++ drivers/net/wireless/ath/wil6210/txrx.c | 26 +++++++++++++++++++++----- 2 files changed, 24 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 0652efed6b5d..712ebbfbdeb8 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -438,6 +438,9 @@ int wil_priv_init(struct wil6210_priv *wil) for (i = 0; i < WIL6210_MAX_CID; i++) spin_lock_init(&wil->sta[i].tid_rx_lock); + for (i = 0; i < WIL6210_MAX_TX_RINGS; i++) + spin_lock_init(&wil->vring_tx_data[i].lock); + mutex_init(&wil->mutex); mutex_init(&wil->wmi_mutex); mutex_init(&wil->back_rx_mutex); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index 9680b970b863..6af20903cf89 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -717,6 +717,21 @@ void wil_rx_fini(struct wil6210_priv *wil) wil_vring_free(wil, vring, 0); } +static inline void wil_tx_data_init(struct vring_tx_data *txdata) +{ + spin_lock_bh(&txdata->lock); + txdata->dot1x_open = 0; + txdata->enabled = 0; + txdata->idle = 0; + txdata->last_idle = 0; + txdata->begin = 0; + txdata->agg_wsize = 0; + txdata->agg_timeout = 0; + txdata->agg_amsdu = 0; + txdata->addba_in_progress = false; + spin_unlock_bh(&txdata->lock); +} + int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, int cid, int tid) { @@ -758,8 +773,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, goto out; } - memset(txdata, 0, sizeof(*txdata)); - spin_lock_init(&txdata->lock); + wil_tx_data_init(txdata); vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -791,8 +805,10 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size, return 0; out_free: + spin_lock_bh(&txdata->lock); txdata->dot1x_open = false; txdata->enabled = 0; + spin_unlock_bh(&txdata->lock); wil_vring_free(wil, vring, 1); wil->vring2cid_tid[id][0] = WIL6210_MAX_CID; wil->vring2cid_tid[id][1] = 0; @@ -834,8 +850,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) goto out; } - memset(txdata, 0, sizeof(*txdata)); - spin_lock_init(&txdata->lock); + wil_tx_data_init(txdata); vring->size = size; rc = wil_vring_alloc(wil, vring); if (rc) @@ -865,8 +880,10 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size) return 0; out_free: + spin_lock_bh(&txdata->lock); txdata->enabled = 0; txdata->dot1x_open = false; + spin_unlock_bh(&txdata->lock); wil_vring_free(wil, vring, 1); out: @@ -894,7 +911,6 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id) napi_synchronize(&wil->napi_tx); wil_vring_free(wil, vring, 1); - memset(txdata, 0, sizeof(*txdata)); } static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil, -- cgit v1.2.3 From 78771d76f826202012201d700028a2d866d03fb3 Mon Sep 17 00:00:00 2001 From: Vladimir Kondratiev Date: Thu, 28 Jan 2016 19:24:03 +0200 Subject: wil6210: wait for disconnect completion cfg80211_ops.disconnect() should wait for disconnect flow to complete. If it does not, internal state becomes out of sync with one in cfg80211. If one does stress test connect/disconnect sequence, cfg80211 will issue next connect before disconnect completed internally. Signed-off-by: Vladimir Kondratiev Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/cfg80211.c | 13 ++++++++++++- drivers/net/wireless/ath/wil6210/main.c | 28 +++++++++------------------- drivers/net/wireless/ath/wil6210/wil6210.h | 1 + 3 files changed, 22 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 774352f9e256..97ad91eef4a6 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -535,7 +535,18 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy, wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code); - rc = wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); + if (!(test_bit(wil_status_fwconnecting, wil->status) || + test_bit(wil_status_fwconnected, wil->status))) { + wil_err(wil, "%s: Disconnect was called while disconnected\n", + __func__); + return 0; + } + + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + WMI_DISCONNECT_EVENTID, NULL, 0, + WIL6210_DISCONNECT_TO_MS); + if (rc) + wil_err(wil, "%s: disconnect error %d\n", __func__, rc); return rc; } diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 712ebbfbdeb8..78ba6e04c944 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -23,9 +23,6 @@ #include "wmi.h" #include "boot_loader.h" -#define WAIT_FOR_DISCONNECT_TIMEOUT_MS 2000 -#define WAIT_FOR_DISCONNECT_INTERVAL_MS 10 - bool debug_fw; /* = false; */ module_param(debug_fw, bool, S_IRUGO); MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug"); @@ -942,8 +939,7 @@ int wil_up(struct wil6210_priv *wil) int __wil_down(struct wil6210_priv *wil) { - int iter = WAIT_FOR_DISCONNECT_TIMEOUT_MS / - WAIT_FOR_DISCONNECT_INTERVAL_MS; + int rc; WARN_ON(!mutex_is_locked(&wil->mutex)); @@ -967,22 +963,16 @@ int __wil_down(struct wil6210_priv *wil) } if (test_bit(wil_status_fwconnected, wil->status) || - test_bit(wil_status_fwconnecting, wil->status)) - wmi_send(wil, WMI_DISCONNECT_CMDID, NULL, 0); + test_bit(wil_status_fwconnecting, wil->status)) { - /* make sure wil is idle (not connected) */ - mutex_unlock(&wil->mutex); - while (iter--) { - int idle = !test_bit(wil_status_fwconnected, wil->status) && - !test_bit(wil_status_fwconnecting, wil->status); - if (idle) - break; - msleep(WAIT_FOR_DISCONNECT_INTERVAL_MS); + mutex_unlock(&wil->mutex); + rc = wmi_call(wil, WMI_DISCONNECT_CMDID, NULL, 0, + WMI_DISCONNECT_EVENTID, NULL, 0, + WIL6210_DISCONNECT_TO_MS); + mutex_lock(&wil->mutex); + if (rc) + wil_err(wil, "timeout waiting for disconnect\n"); } - mutex_lock(&wil->mutex); - - if (iter < 0) - wil_err(wil, "timeout waiting for idle FW/HW\n"); wil_reset(wil, false); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 1b8fa1d2bae9..950296507dd1 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -92,6 +92,7 @@ static inline u32 wil_mtu2macbuf(u32 mtu) #define WIL6210_FW_RECOVERY_RETRIES (5) /* try to recover this many times */ #define WIL6210_FW_RECOVERY_TO msecs_to_jiffies(5000) #define WIL6210_SCAN_TO msecs_to_jiffies(10000) +#define WIL6210_DISCONNECT_TO_MS (2000) #define WIL6210_RX_HIGH_TRSH_INIT (0) #define WIL6210_RX_HIGH_TRSH_DEFAULT \ (1 << (WIL_RX_RING_SIZE_ORDER_DEFAULT - 3)) -- cgit v1.2.3 From fe5c271e2878fb080f1b32fec5b4e3f7a6070152 Mon Sep 17 00:00:00 2001 From: Maya Erez Date: Thu, 28 Jan 2016 19:24:04 +0200 Subject: wil6210: protect synchronous wmi commands handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In case there are multiple WMI commands with the same reply_id, the following scenario can occur: - Driver sends the first command to the device - The reply didn’t get on time and there is timeout - Reply_id, reply_buf and reply_size are set to 0 - Driver sends second wmi command with the same reply_id as the first - Driver sets wil->reply_id - Reply for the first wmi command arrives and handled by wmi_recv_cmd - As its ID fits the reply_id but the reply_buf is not set yet it is handled as a reply with event handler, and WARN_ON is printed This patch guarantee atomic setting of all the reply variables and prevents the above scenario. Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/wmi.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index e1a6cb8840ed..493e721c4fa7 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -838,6 +838,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi; u16 id = le16_to_cpu(wmi->id); u32 tstamp = le32_to_cpu(wmi->timestamp); + spin_lock_irqsave(&wil->wmi_ev_lock, flags); if (wil->reply_id && wil->reply_id == id) { if (wil->reply_buf) { memcpy(wil->reply_buf, wmi, @@ -845,6 +846,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil) immed_reply = true; } } + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n", id, wmi->mid, tstamp); @@ -888,13 +890,16 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, mutex_lock(&wil->wmi_mutex); + spin_lock(&wil->wmi_ev_lock); + wil->reply_id = reply_id; + wil->reply_buf = reply; + wil->reply_size = reply_size; + spin_unlock(&wil->wmi_ev_lock); + rc = __wmi_send(wil, cmdid, buf, len); if (rc) goto out; - wil->reply_id = reply_id; - wil->reply_buf = reply; - wil->reply_size = reply_size; remain = wait_for_completion_timeout(&wil->wmi_call, msecs_to_jiffies(to_msec)); if (0 == remain) { @@ -907,10 +912,14 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, cmdid, reply_id, to_msec - jiffies_to_msecs(remain)); } + +out: + spin_lock(&wil->wmi_ev_lock); wil->reply_id = 0; wil->reply_buf = NULL; wil->reply_size = 0; - out: + spin_unlock(&wil->wmi_ev_lock); + mutex_unlock(&wil->wmi_mutex); return rc; -- cgit v1.2.3 From ee5dfe0d240e21afe63bdd77bf1d4a386203aa6e Mon Sep 17 00:00:00 2001 From: Hamad Kadmany Date: Thu, 28 Jan 2016 19:24:05 +0200 Subject: wil6210: TX vring optimization Tx vring needs to be enlarged to get better performance for traffic over 2Gbps. Signed-off-by: Hamad Kadmany Signed-off-by: Maya Erez Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/wil6210/debugfs.c | 6 +++--- drivers/net/wireless/ath/wil6210/wil6210.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index 97bc186f9728..98c9148a3450 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012-2015 Qualcomm Atheros, Inc. + * Copyright (c) 2012-2016 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -68,13 +68,13 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil, seq_puts(s, "???\n"); } - if (vring->va && (vring->size < 1025)) { + if (vring->va && (vring->size <= (1 << WIL_RING_SIZE_ORDER_MAX))) { uint i; for (i = 0; i < vring->size; i++) { volatile struct vring_tx_desc *d = &vring->va[i].tx; - if ((i % 64) == 0 && (i != 0)) + if ((i % 128) == 0 && (i != 0)) seq_puts(s, "\n"); seq_printf(s, "%c", (d->dma.status & BIT(0)) ? _s : (vring->ctx[i].skb ? _h : 'h')); diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 950296507dd1..8427d68b6fa8 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -51,7 +51,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL_TX_Q_LEN_DEFAULT (4000) #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) -#define WIL_TX_RING_SIZE_ORDER_DEFAULT (10) +#define WIL_TX_RING_SIZE_ORDER_DEFAULT (12) #define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7) #define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */ /* limit ring size in range [32..32k] */ -- cgit v1.2.3 From 0b3d76e9f8b4a2b67358b454843ede30555f93a5 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Thu, 28 Jan 2016 13:54:07 -0800 Subject: ath10k: rename Mesh related service names WMI_10_4_SERVICE_MESH bit is for non IEEE802.11s Mesh. Hence rename it to WMI_10_4_SERVICE_MESH_NON_11S. Also add _11S as post-fix to each of WMI_SERVICE_MESH and WMI_VDEV_SUBTYPE_MESH specifying the service is for 11s Mesh. This will help users to distinguish 11s Mesh from non 11s Mesh. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 4 ++-- drivers/net/wireless/ath/ath10k/wmi.h | 16 +++++++++------- 2 files changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index ef0438d2cc8f..1406b0f1d883 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4380,8 +4380,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, arvif->vdev_type = WMI_VDEV_TYPE_IBSS; break; case NL80211_IFTYPE_MESH_POINT: - if (test_bit(WMI_SERVICE_MESH, ar->wmi.svc_map)) { - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH; + if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { + arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { ret = -EINVAL; ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 806d4bb6e7d6..0458bf6bcdfc 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -176,7 +176,8 @@ enum wmi_service { WMI_SERVICE_AUX_CHAN_LOAD_INTF, WMI_SERVICE_BSS_CHANNEL_INFO_64, WMI_SERVICE_EXT_RES_CFG_SUPPORT, - WMI_SERVICE_MESH, + WMI_SERVICE_MESH_11S, + WMI_SERVICE_MESH_NON_11S, WMI_SERVICE_PEER_STATS, /* keep last */ @@ -296,7 +297,7 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF, WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, - WMI_10_4_SERVICE_MESH, + WMI_10_4_SERVICE_MESH_NON_11S, }; static inline char *wmi_service_name(int service_id) @@ -387,7 +388,8 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF); SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64); SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT); - SVCSTR(WMI_SERVICE_MESH); + SVCSTR(WMI_SERVICE_MESH_11S); + SVCSTR(WMI_SERVICE_MESH_NON_11S); SVCSTR(WMI_SERVICE_PEER_STATS); default: return NULL; @@ -463,7 +465,7 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64, WMI_SERVICE_BSS_CHANNEL_INFO_64, len); SVCMAP(WMI_10X_SERVICE_MESH, - WMI_SERVICE_MESH, len); + WMI_SERVICE_MESH_11S, len); SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT, WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); SVCMAP(WMI_10X_SERVICE_PEER_STATS, @@ -628,8 +630,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_BSS_CHANNEL_INFO_64, len); SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); - SVCMAP(WMI_10_4_SERVICE_MESH, - WMI_SERVICE_MESH, len); + SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S, + WMI_SERVICE_MESH_NON_11S, len); } #undef SVCMAP @@ -4287,7 +4289,7 @@ enum wmi_vdev_subtype { WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, WMI_VDEV_SUBTYPE_P2P_GO = 3, WMI_VDEV_SUBTYPE_PROXY_STA = 4, - WMI_VDEV_SUBTYPE_MESH = 5, + WMI_VDEV_SUBTYPE_MESH_11S = 5, }; /* values for vdev_subtype */ -- cgit v1.2.3 From e70e9ba9df8609908c34d042cf980c1a59c56900 Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Thu, 28 Jan 2016 13:54:08 -0800 Subject: ath10k: update 10.4 WMI service map Update 10.4 WMI service map to sync to the latest 10.4 firmware as of 1/20/2016. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/wmi.h | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 0458bf6bcdfc..031018a07231 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -179,6 +179,7 @@ enum wmi_service { WMI_SERVICE_MESH_11S, WMI_SERVICE_MESH_NON_11S, WMI_SERVICE_PEER_STATS, + WMI_SERVICE_RESTRT_CHNL_SUPPORT, /* keep last */ WMI_SERVICE_MAX, @@ -298,6 +299,9 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64, WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT, WMI_10_4_SERVICE_MESH_NON_11S, + WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_10_4_SERVICE_PEER_STATS, + WMI_10_4_SERVICE_MESH_11S, }; static inline char *wmi_service_name(int service_id) @@ -391,6 +395,7 @@ static inline char *wmi_service_name(int service_id) SVCSTR(WMI_SERVICE_MESH_11S); SVCSTR(WMI_SERVICE_MESH_NON_11S); SVCSTR(WMI_SERVICE_PEER_STATS); + SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT); default: return NULL; } @@ -632,6 +637,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_EXT_RES_CFG_SUPPORT, len); SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S, WMI_SERVICE_MESH_NON_11S, len); + SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT, + WMI_SERVICE_RESTRT_CHNL_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_PEER_STATS, + WMI_SERVICE_PEER_STATS, len); + SVCMAP(WMI_10_4_SERVICE_MESH_11S, + WMI_SERVICE_MESH_11S, len); } #undef SVCMAP -- cgit v1.2.3 From 08c27be1db84dcdad14e86ec76d9c9915deb14eb Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Thu, 28 Jan 2016 13:54:09 -0800 Subject: ath10k: use vif->type and vif->p2p for P2P_GO check Interface type P2P_GO can be checked by either arvif->vdev_type and arvif->vdev_subtype or vif->type and vif->p2p. Use later one to avoid more cpu consumption that could happen when subtype abstraction layer change is introduced. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 8 ++------ drivers/net/wireless/ath/ath10k/wmi.c | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 1406b0f1d883..addf3cb3aeb1 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -1358,10 +1358,7 @@ static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, const u8 *p2p_ie; int ret; - if (arvif->vdev_type != WMI_VDEV_TYPE_AP) - return 0; - - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) return 0; mgmt = (void *)bcn->data; @@ -3259,8 +3256,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); /* This is case only for P2P_GO */ - if (arvif->vdev_type != WMI_VDEV_TYPE_AP || - arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) return; if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 0f01a8d99604..5158716d6dff 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -3196,7 +3196,7 @@ static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif, struct sk_buff *bcn, const struct wmi_p2p_noa_info *noa) { - if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) + if (!arvif->vif->p2p) return; ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed); -- cgit v1.2.3 From 6e4de1a49aa3254f46f66db5aac530707c193cde Mon Sep 17 00:00:00 2001 From: Peter Oh Date: Thu, 28 Jan 2016 13:54:10 -0800 Subject: ath10k: add abstraction layer for vdev subtype Abstraction layer for vdev subtype is added to solve subtype mismatch and to give flexible compatibility among different firmware revisions. For instance, 10.2 and 10.4 firmware has different definition of their vdev subtypes for Mesh. 10.4 defined subtype 6 for 802.11s Mesh while 10.2 uses 5. Hence use the abstraction API to get right subtype to use. Signed-off-by: Peter Oh Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/mac.c | 15 ++++--- drivers/net/wireless/ath/ath10k/wmi-ops.h | 11 +++++ drivers/net/wireless/ath/ath10k/wmi-tlv.c | 1 + drivers/net/wireless/ath/ath10k/wmi.c | 70 +++++++++++++++++++++++++++++++ drivers/net/wireless/ath/ath10k/wmi.h | 42 ++++++++++++++++--- 5 files changed, 128 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index addf3cb3aeb1..96e2f8ad22fc 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -4359,25 +4359,29 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, bit, ar->free_vdev_map); arvif->vdev_id = bit; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; + arvif->vdev_subtype = + ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); switch (vif->type) { case NL80211_IFTYPE_P2P_DEVICE: arvif->vdev_type = WMI_VDEV_TYPE_STA; - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: arvif->vdev_type = WMI_VDEV_TYPE_STA; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); break; case NL80211_IFTYPE_ADHOC: arvif->vdev_type = WMI_VDEV_TYPE_IBSS; break; case NL80211_IFTYPE_MESH_POINT: if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_MESH_11S); } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { ret = -EINVAL; ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); @@ -4389,7 +4393,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, arvif->vdev_type = WMI_VDEV_TYPE_AP; if (vif->p2p) - arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; + arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype + (ar, WMI_VDEV_SUBTYPE_P2P_GO); break; case NL80211_IFTYPE_MONITOR: arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 8f4f6a892581..32ab34edceb5 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -186,6 +186,8 @@ struct wmi_ops { u8 enable, u32 detect_level, u32 detect_margin); + int (*get_vdev_subtype)(struct ath10k *ar, + enum wmi_vdev_subtype subtype); }; int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); @@ -1327,4 +1329,13 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable, ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid); } +static inline int +ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype) +{ + if (!ar->wmi.ops->get_vdev_subtype) + return -EOPNOTSUPP; + + return ar->wmi.ops->get_vdev_subtype(ar, subtype); +} + #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 3b3a27b859f3..108593202052 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3483,6 +3483,7 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update, .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs, .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, }; static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 5158716d6dff..04799c73ebc8 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -7413,6 +7413,71 @@ unlock: buf[len] = 0; } +int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_LEGACY_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -ENOTSUPP; + } + return -ENOTSUPP; +} + +static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_10_2_4_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return -ENOTSUPP; + } + return -ENOTSUPP; +} + +static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype) +{ + switch (subtype) { + case WMI_VDEV_SUBTYPE_NONE: + return WMI_VDEV_SUBTYPE_10_4_NONE; + case WMI_VDEV_SUBTYPE_P2P_DEVICE: + return WMI_VDEV_SUBTYPE_10_4_P2P_DEV; + case WMI_VDEV_SUBTYPE_P2P_CLIENT: + return WMI_VDEV_SUBTYPE_10_4_P2P_CLI; + case WMI_VDEV_SUBTYPE_P2P_GO: + return WMI_VDEV_SUBTYPE_10_4_P2P_GO; + case WMI_VDEV_SUBTYPE_PROXY_STA: + return WMI_VDEV_SUBTYPE_10_4_PROXY_STA; + case WMI_VDEV_SUBTYPE_MESH_11S: + return WMI_VDEV_SUBTYPE_10_4_MESH_11S; + case WMI_VDEV_SUBTYPE_MESH_NON_11S: + return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S; + } + return -ENOTSUPP; +} + static const struct wmi_ops wmi_ops = { .rx = ath10k_wmi_op_rx, .map_svc = wmi_main_svc_map, @@ -7472,6 +7537,7 @@ static const struct wmi_ops wmi_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -7539,6 +7605,7 @@ static const struct wmi_ops wmi_10_1_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -7607,6 +7674,7 @@ static const struct wmi_ops wmi_10_2_ops = { .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp, .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, + .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, /* .gen_pdev_enable_adaptive_cca not implemented */ }; @@ -7673,6 +7741,7 @@ static const struct wmi_ops wmi_10_2_4_ops = { .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, .gen_pdev_enable_adaptive_cca = ath10k_wmi_op_gen_pdev_enable_adaptive_cca, + .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -7739,6 +7808,7 @@ static const struct wmi_ops wmi_10_4_ops = { /* shared with 10.2 */ .gen_request_stats = ath10k_wmi_op_gen_request_stats, .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature, + .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype, }; int ath10k_wmi_attach(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 031018a07231..049c61820765 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -4295,12 +4295,40 @@ enum wmi_vdev_type { }; enum wmi_vdev_subtype { - WMI_VDEV_SUBTYPE_NONE = 0, - WMI_VDEV_SUBTYPE_P2P_DEVICE = 1, - WMI_VDEV_SUBTYPE_P2P_CLIENT = 2, - WMI_VDEV_SUBTYPE_P2P_GO = 3, - WMI_VDEV_SUBTYPE_PROXY_STA = 4, - WMI_VDEV_SUBTYPE_MESH_11S = 5, + WMI_VDEV_SUBTYPE_NONE, + WMI_VDEV_SUBTYPE_P2P_DEVICE, + WMI_VDEV_SUBTYPE_P2P_CLIENT, + WMI_VDEV_SUBTYPE_P2P_GO, + WMI_VDEV_SUBTYPE_PROXY_STA, + WMI_VDEV_SUBTYPE_MESH_11S, + WMI_VDEV_SUBTYPE_MESH_NON_11S, +}; + +enum wmi_vdev_subtype_legacy { + WMI_VDEV_SUBTYPE_LEGACY_NONE = 0, + WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3, + WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4, +}; + +enum wmi_vdev_subtype_10_2_4 { + WMI_VDEV_SUBTYPE_10_2_4_NONE = 0, + WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3, + WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5, +}; + +enum wmi_vdev_subtype_10_4 { + WMI_VDEV_SUBTYPE_10_4_NONE = 0, + WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1, + WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2, + WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3, + WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4, + WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5, + WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6, }; /* values for vdev_subtype */ @@ -6471,5 +6499,7 @@ size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head); void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats, char *buf); +int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar, + enum wmi_vdev_subtype subtype); #endif /* _WMI_H_ */ -- cgit v1.2.3 From b1a958c933c5b0f967844ec05e76ec2985450782 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Fri, 29 Jan 2016 16:35:11 +0530 Subject: ath10k: add qca4019 hw params Add a new entry in hw_params_list for qca4019 with list of it's own details. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 20 ++++++++++++++++++++ drivers/net/wireless/ath/ath10k/hw.h | 8 ++++++++ drivers/net/wireless/ath/ath10k/targaddrs.h | 3 +++ 3 files changed, 31 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 1f4a27881936..79043872e7ac 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -201,6 +201,26 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ, }, }, + { + .id = QCA4019_HW_1_0_DEV_VERSION, + .dev_id = 0, + .name = "qca4019 hw1.0", + .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, + .uart_pin = 7, + .otp_exe_param = 0x0010000, + .continuous_frag_desc = true, + .channel_counters_freq_hz = 125000, + .max_probe_resp_desc_thres = 24, + .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .fw = { + .dir = QCA4019_HW_1_0_FW_DIR, + .fw = QCA4019_HW_1_0_FW_FILE, + .otp = QCA4019_HW_1_0_OTP_FILE, + .board = QCA4019_HW_1_0_BOARD_DATA_FILE, + .board_size = QCA4019_BOARD_DATA_SZ, + .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ, + }, + }, }; static const char *const ath10k_core_fw_feature_str[] = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 4217bdbe9f01..291ca1f798e3 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -106,6 +106,14 @@ enum qca9377_chip_id_rev { #define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin" #define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234 +/* QCA4019 1.0 definitions */ +#define QCA4019_HW_1_0_DEV_VERSION 0x01000000 +#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0" +#define QCA4019_HW_1_0_FW_FILE "firmware.bin" +#define QCA4019_HW_1_0_OTP_FILE "otp.bin" +#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin" +#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234 + #define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin" diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h index 05a421bc322a..361f143b019c 100644 --- a/drivers/net/wireless/ath/ath10k/targaddrs.h +++ b/drivers/net/wireless/ath/ath10k/targaddrs.h @@ -456,4 +456,7 @@ Fw Mode/SubMode Mask #define QCA99X0_BOARD_DATA_SZ 12288 #define QCA99X0_BOARD_EXT_DATA_SZ 0 +#define QCA4019_BOARD_DATA_SZ 12064 +#define QCA4019_BOARD_EXT_DATA_SZ 0 + #endif /* __TARGADDRS_H__ */ -- cgit v1.2.3 From 5699a6f238ebfc8a6be3963a30d86d6f1e8aaaf9 Mon Sep 17 00:00:00 2001 From: Raja Mani Date: Fri, 29 Jan 2016 16:35:12 +0530 Subject: ath10k: populate qca4019 fw specific wmi init params Some of the parameter like tx/rx chain mask, number of htt tx desc, qcache active peer count, etc goes via wmi init cmd to qca4019 firmware are different. To make use of 10.4 gen_init function for qca4019, change wmi service ready handler and 10.4 wmi init functions to adapt qca4019 specific init values. Signed-off-by: Raja Mani Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 14 ++++++++++++-- drivers/net/wireless/ath/ath10k/core.h | 6 ++++++ drivers/net/wireless/ath/ath10k/hw.h | 6 ------ drivers/net/wireless/ath/ath10k/wmi.c | 12 ++++++------ drivers/net/wireless/ath/ath10k/wmi.h | 1 - 5 files changed, 24 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 79043872e7ac..4d3176492ae7 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -156,6 +156,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 150000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .num_msdu_desc = 1424, + .qcache_active_peers = 50, + .tx_chain_mask = 0xf, + .rx_chain_mask = 0xf, + .max_spatial_stream = 4, .fw = { .dir = QCA99X0_HW_2_0_FW_DIR, .fw = QCA99X0_HW_2_0_FW_FILE, @@ -212,6 +217,11 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .channel_counters_freq_hz = 125000, .max_probe_resp_desc_thres = 24, .hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE, + .num_msdu_desc = 2500, + .qcache_active_peers = 35, + .tx_chain_mask = 0x3, + .rx_chain_mask = 0x3, + .max_spatial_stream = 2, .fw = { .dir = QCA4019_HW_1_0_FW_DIR, .fw = QCA4019_HW_1_0_FW_FILE, @@ -1523,9 +1533,9 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS; ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS; ar->num_tids = TARGET_10_4_TGT_NUM_TIDS; - ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC; + ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc; ar->fw_stats_req_mask = WMI_STAT_PEER; - ar->max_spatial_stream = WMI_10_4_MAX_SPATIAL_STREAM; + ar->max_spatial_stream = ar->hw_params.max_spatial_stream; break; case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_MAX: diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 2e411b5258c2..a62b62a62266 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -680,6 +680,12 @@ struct ath10k { /* The padding bytes's location is different on various chips */ enum ath10k_hw_4addr_pad hw_4addr_pad; + u32 num_msdu_desc; + u32 qcache_active_peers; + u32 tx_chain_mask; + u32 rx_chain_mask; + u32 max_spatial_stream; + struct ath10k_hw_params_fw { const char *dir; const char *fw; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 291ca1f798e3..2dece8db83f8 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -426,16 +426,11 @@ enum ath10k_hw_4addr_pad { #define TARGET_10_4_ACTIVE_PEERS 0 #define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512 -#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50 #define TARGET_10_4_NUM_OFFLOAD_PEERS 0 #define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0 #define TARGET_10_4_NUM_PEER_KEYS 2 #define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2) #define TARGET_10_4_AST_SKID_LIMIT 32 -#define TARGET_10_4_TX_CHAIN_MASK (BIT(0) | BIT(1) | \ - BIT(2) | BIT(3)) -#define TARGET_10_4_RX_CHAIN_MASK (BIT(0) | BIT(1) | \ - BIT(2) | BIT(3)) /* 100 ms for video, best-effort, and background */ #define TARGET_10_4_RX_TIMEOUT_LO_PRI 100 @@ -461,7 +456,6 @@ enum ath10k_hw_4addr_pad { #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1 #define TARGET_10_4_VOW_CONFIG 0 #define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3 -#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400) #define TARGET_10_4_11AC_TX_MAX_FRAGS 2 #define TARGET_10_4_MAX_PEER_EXT_STATS 16 #define TARGET_10_4_SMART_ANT_CAP 0 diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 04799c73ebc8..1ce67423224a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -4618,9 +4618,9 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work) if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) { ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX + - TARGET_10_4_NUM_VDEVS; - ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS + - TARGET_10_4_NUM_VDEVS; + ar->max_num_vdevs; + ar->num_active_peers = ar->hw_params.qcache_active_peers + + ar->max_num_vdevs; ar->num_tids = ar->num_active_peers * 2; ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX; } @@ -5602,8 +5602,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS); config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS); config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT); - config.tx_chain_mask = __cpu_to_le32(TARGET_10_4_TX_CHAIN_MASK); - config.rx_chain_mask = __cpu_to_le32(TARGET_10_4_RX_CHAIN_MASK); + config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask); + config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask); config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI); @@ -5634,7 +5634,7 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar) config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG); config.gtk_offload_max_vdev = __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV); - config.num_msdu_desc = __cpu_to_le32(TARGET_10_4_NUM_MSDU_DESC); + config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx); config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS); config.max_peer_ext_stats = __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 049c61820765..4d3cbc44fcd2 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -1818,7 +1818,6 @@ enum wmi_channel_change_cause { #define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13) #define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */ -#define WMI_10_4_MAX_SPATIAL_STREAM 4 /* HT Capabilities*/ #define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */ -- cgit v1.2.3 From 04d5a21d62887b9bc5383fc68ab2756287da4532 Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Wed, 9 Dec 2015 15:50:23 -0800 Subject: i40e: Add mac_filter_element at the end of the list instead of HEAD Add MAC filter element to the end of the list in the given order, just to be tidy, and just in case there are ever any ordering issues in the future. Change-ID: Idc15276147593ea9393ac72c861f9c7905a791b4 Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8f3b53e0dc46..d078a6331365 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1368,7 +1368,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, f->changed = true; INIT_LIST_HEAD(&f->list); - list_add(&f->list, &vsi->mac_filter_list); + list_add_tail(&f->list, &vsi->mac_filter_list); } /* increment counter and add a new flag if needed */ -- cgit v1.2.3 From 6e35c04cf633e55648acb9ccabff42aa37bd4044 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 9 Dec 2015 15:50:24 -0800 Subject: i40e/i40evf: Fix RSS rx-flow-hash configuration through ethtool This patch fixes the Hash PCTYPE enable for X722 since it supports a broader selection of PCTYPES for TCP and UDP. This patch also fixes a bug in XL710, X710, X722 support for RSS, as of now we cannot reduce the (4)tuple for RSS for TCP/IPv4/IPV6 or UDP/IPv4/IPv6 packets since this requires a product feature change that comes in a later release. A VF should never be allowed to change the tuples for RSS for any PCTYPE since that's a global setting for the device in case of i40e devices. Change-ID: I0ee7203c9b24813260f58f3220798bc9d9ac4a12 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 14 +++----- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 40 +++++----------------- 2 files changed, 12 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 29d5833e24a3..c8b9dcae630a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2166,8 +2166,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; @@ -2178,8 +2177,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case TCP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; @@ -2190,9 +2188,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V4_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); @@ -2204,9 +2200,7 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case UDP_V6_FLOW: switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; + return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index a4c9feb589e7..8906785446ab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -477,54 +477,30 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, switch (nfc->flow_type) { case TCP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - break; - default: + else return -EINVAL; - } break; case TCP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - break; - default: + else return -EINVAL; - } break; case UDP_V4_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); - break; - default: + } else { return -EINVAL; } break; case UDP_V6_FLOW: - switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - case 0: - hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); - break; - default: + } else { return -EINVAL; } break; -- cgit v1.2.3 From 48b1804ee3cdad7bf115666eb35edf12a734710f Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Wed, 9 Dec 2015 15:50:25 -0800 Subject: i40e: Replace X722 mac check in ethtool get_settings 100M SGMII is only supported on X722. Replace the mac check with a feature flag check that is only set for the X722 device. Change-ID: I53452d9af6af8cd9dca8500215fbc6ce93418f52 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 68f2204ec6f3..47f6c0a9c9fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -339,6 +339,7 @@ struct i40e_pf { #define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40) #define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) +#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) #define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index c8b9dcae630a..252a9dd941d4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -340,7 +340,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, SUPPORTED_1000baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (pf->hw.mac.type == I40E_MAC_X722) { + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { ecmd->supported |= SUPPORTED_100baseT_Full; if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) -- cgit v1.2.3 From 209dc4daf23f92b3e0bc6d602411506c4083e421 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 9 Dec 2015 15:50:27 -0800 Subject: i40evf: allow channel bonding of VFs In some modes, bonding would not enslave VF interfaces. This is due to bonding calling change_mtu and the immediately calling open. Because of the asynchronous nature of the admin queue mechanism, the VF returns -EBUSY to the open call, because it knows the previous operation hasn't finished yet. This causes bonding to fail with a less-than-useful error message. To fix this, remove the check for pending operations at the beginning of open. But this introduces a new bug where the driver will panic on a quick close/open cycle. To fix that, we add a new driver state, __I40EVF_DOWN_PENDING, that the driver enters when down is called. The driver finally transitions to a fully DOWN state when it receives confirmation from the PF driver that all the queues are disabled. This allows open to complete even if there is a pending mtu change, and bonding is finally happy. Change-ID: I06f4c7e435d5bacbfceaa7c3f209e0ff04be21cc Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 1 + drivers/net/ethernet/intel/i40evf/i40evf_main.c | 9 +++++---- drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c | 2 ++ 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index be1b72b93888..9e15f68d9ddd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -173,6 +173,7 @@ enum i40evf_state_t { __I40EVF_RESETTING, /* in reset */ /* Below here, watchdog is running */ __I40EVF_DOWN, /* ready, can be opened */ + __I40EVF_DOWN_PENDING, /* descending, waiting for watchdog */ __I40EVF_TESTING, /* in ethtool self-test */ __I40EVF_RUNNING, /* opened, working */ }; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 94da913b151d..d1c4335114fc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1032,7 +1032,7 @@ void i40evf_down(struct i40evf_adapter *adapter) struct net_device *netdev = adapter->netdev; struct i40evf_mac_filter *f; - if (adapter->state == __I40EVF_DOWN) + if (adapter->state <= __I40EVF_DOWN_PENDING) return; while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, @@ -2142,7 +2142,8 @@ static int i40evf_open(struct net_device *netdev) dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n"); return -EIO; } - if (adapter->state != __I40EVF_DOWN || adapter->aq_required) + + if (adapter->state != __I40EVF_DOWN) return -EBUSY; /* allocate transmit descriptors */ @@ -2197,14 +2198,14 @@ static int i40evf_close(struct net_device *netdev) { struct i40evf_adapter *adapter = netdev_priv(netdev); - if (adapter->state <= __I40EVF_DOWN) + if (adapter->state <= __I40EVF_DOWN_PENDING) return 0; set_bit(__I40E_DOWN, &adapter->vsi.state); i40evf_down(adapter); - adapter->state = __I40EVF_DOWN; + adapter->state = __I40EVF_DOWN_PENDING; i40evf_free_traffic_irqs(adapter); return 0; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index c1c526283757..d3739cc5b608 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -804,6 +804,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, case I40E_VIRTCHNL_OP_DISABLE_QUEUES: i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); + if (adapter->state == __I40EVF_DOWN_PENDING) + adapter->state = __I40EVF_DOWN; break; case I40E_VIRTCHNL_OP_VERSION: case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP: -- cgit v1.2.3 From 406e734aa893fa5841e67de6d4f688ba70a82e4f Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Thu, 10 Dec 2015 11:38:49 -0800 Subject: i40e: define function capabilities in only one place The device capabilities were defined in two places, and neither had all the definitions. It really belongs with the AQ API definition, so this patch removes the other set of definitions and fills out the missing item. Change-ID: I273ba7d79a476cd11d2e0ca5825fec1716740de2 Signed-off-by: Shannon Nelson Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 1 + drivers/net/ethernet/intel/i40e/i40e_common.c | 85 +++++++--------------- .../net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 1 + 3 files changed, 30 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index b22012a446a6..256ce6549f61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -422,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 6a034ddac36a..4bdb08bb27d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2765,35 +2765,6 @@ i40e_aq_erase_nvm_exit: return status; } -#define I40E_DEV_FUNC_CAP_SWITCH_MODE 0x01 -#define I40E_DEV_FUNC_CAP_MGMT_MODE 0x02 -#define I40E_DEV_FUNC_CAP_NPAR 0x03 -#define I40E_DEV_FUNC_CAP_OS2BMC 0x04 -#define I40E_DEV_FUNC_CAP_VALID_FUNC 0x05 -#define I40E_DEV_FUNC_CAP_SRIOV_1_1 0x12 -#define I40E_DEV_FUNC_CAP_VF 0x13 -#define I40E_DEV_FUNC_CAP_VMDQ 0x14 -#define I40E_DEV_FUNC_CAP_802_1_QBG 0x15 -#define I40E_DEV_FUNC_CAP_802_1_QBH 0x16 -#define I40E_DEV_FUNC_CAP_VSI 0x17 -#define I40E_DEV_FUNC_CAP_DCB 0x18 -#define I40E_DEV_FUNC_CAP_FCOE 0x21 -#define I40E_DEV_FUNC_CAP_ISCSI 0x22 -#define I40E_DEV_FUNC_CAP_RSS 0x40 -#define I40E_DEV_FUNC_CAP_RX_QUEUES 0x41 -#define I40E_DEV_FUNC_CAP_TX_QUEUES 0x42 -#define I40E_DEV_FUNC_CAP_MSIX 0x43 -#define I40E_DEV_FUNC_CAP_MSIX_VF 0x44 -#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR 0x45 -#define I40E_DEV_FUNC_CAP_IEEE_1588 0x46 -#define I40E_DEV_FUNC_CAP_FLEX10 0xF1 -#define I40E_DEV_FUNC_CAP_CEM 0xF2 -#define I40E_DEV_FUNC_CAP_IWARP 0x51 -#define I40E_DEV_FUNC_CAP_LED 0x61 -#define I40E_DEV_FUNC_CAP_SDP 0x62 -#define I40E_DEV_FUNC_CAP_MDIO 0x63 -#define I40E_DEV_FUNC_CAP_WR_CSR_PROT 0x64 - /** * i40e_parse_discover_capabilities * @hw: pointer to the hw struct @@ -2832,79 +2803,79 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, major_rev = cap->major_rev; switch (id) { - case I40E_DEV_FUNC_CAP_SWITCH_MODE: + case I40E_AQ_CAP_ID_SWITCH_MODE: p->switch_mode = number; break; - case I40E_DEV_FUNC_CAP_MGMT_MODE: + case I40E_AQ_CAP_ID_MNG_MODE: p->management_mode = number; break; - case I40E_DEV_FUNC_CAP_NPAR: + case I40E_AQ_CAP_ID_NPAR_ACTIVE: p->npar_enable = number; break; - case I40E_DEV_FUNC_CAP_OS2BMC: + case I40E_AQ_CAP_ID_OS2BMC_CAP: p->os2bmc = number; break; - case I40E_DEV_FUNC_CAP_VALID_FUNC: + case I40E_AQ_CAP_ID_FUNCTIONS_VALID: p->valid_functions = number; break; - case I40E_DEV_FUNC_CAP_SRIOV_1_1: + case I40E_AQ_CAP_ID_SRIOV: if (number == 1) p->sr_iov_1_1 = true; break; - case I40E_DEV_FUNC_CAP_VF: + case I40E_AQ_CAP_ID_VF: p->num_vfs = number; p->vf_base_id = logical_id; break; - case I40E_DEV_FUNC_CAP_VMDQ: + case I40E_AQ_CAP_ID_VMDQ: if (number == 1) p->vmdq = true; break; - case I40E_DEV_FUNC_CAP_802_1_QBG: + case I40E_AQ_CAP_ID_8021QBG: if (number == 1) p->evb_802_1_qbg = true; break; - case I40E_DEV_FUNC_CAP_802_1_QBH: + case I40E_AQ_CAP_ID_8021QBR: if (number == 1) p->evb_802_1_qbh = true; break; - case I40E_DEV_FUNC_CAP_VSI: + case I40E_AQ_CAP_ID_VSI: p->num_vsis = number; break; - case I40E_DEV_FUNC_CAP_DCB: + case I40E_AQ_CAP_ID_DCB: if (number == 1) { p->dcb = true; p->enabled_tcmap = logical_id; p->maxtc = phys_id; } break; - case I40E_DEV_FUNC_CAP_FCOE: + case I40E_AQ_CAP_ID_FCOE: if (number == 1) p->fcoe = true; break; - case I40E_DEV_FUNC_CAP_ISCSI: + case I40E_AQ_CAP_ID_ISCSI: if (number == 1) p->iscsi = true; break; - case I40E_DEV_FUNC_CAP_RSS: + case I40E_AQ_CAP_ID_RSS: p->rss = true; p->rss_table_size = number; p->rss_table_entry_width = logical_id; break; - case I40E_DEV_FUNC_CAP_RX_QUEUES: + case I40E_AQ_CAP_ID_RXQ: p->num_rx_qp = number; p->base_queue = phys_id; break; - case I40E_DEV_FUNC_CAP_TX_QUEUES: + case I40E_AQ_CAP_ID_TXQ: p->num_tx_qp = number; p->base_queue = phys_id; break; - case I40E_DEV_FUNC_CAP_MSIX: + case I40E_AQ_CAP_ID_MSIX: p->num_msix_vectors = number; break; - case I40E_DEV_FUNC_CAP_MSIX_VF: + case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; break; - case I40E_DEV_FUNC_CAP_FLEX10: + case I40E_AQ_CAP_ID_FLEX10: if (major_rev == 1) { if (number == 1) { p->flex10_enable = true; @@ -2920,38 +2891,38 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->flex10_mode = logical_id; p->flex10_status = phys_id; break; - case I40E_DEV_FUNC_CAP_CEM: + case I40E_AQ_CAP_ID_CEM: if (number == 1) p->mgmt_cem = true; break; - case I40E_DEV_FUNC_CAP_IWARP: + case I40E_AQ_CAP_ID_IWARP: if (number == 1) p->iwarp = true; break; - case I40E_DEV_FUNC_CAP_LED: + case I40E_AQ_CAP_ID_LED: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->led[phys_id] = true; break; - case I40E_DEV_FUNC_CAP_SDP: + case I40E_AQ_CAP_ID_SDP: if (phys_id < I40E_HW_CAP_MAX_GPIO) p->sdp[phys_id] = true; break; - case I40E_DEV_FUNC_CAP_MDIO: + case I40E_AQ_CAP_ID_MDIO: if (number == 1) { p->mdio_port_num = phys_id; p->mdio_port_mode = logical_id; } break; - case I40E_DEV_FUNC_CAP_IEEE_1588: + case I40E_AQ_CAP_ID_1588: if (number == 1) p->ieee_1588 = true; break; - case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR: + case I40E_AQ_CAP_ID_FLOW_DIRECTOR: p->fd = true; p->fd_filters_guaranteed = number; p->fd_filters_best_effort = logical_id; break; - case I40E_DEV_FUNC_CAP_WR_CSR_PROT: + case I40E_AQ_CAP_ID_WSR_PROT: p->wr_csr_prot = (u64)number; p->wr_csr_prot |= (u64)logical_id << 32; break; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index f5b2b369dc7c..0d3bc3be0527 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -419,6 +419,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_LED 0x0061 #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 +#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 -- cgit v1.2.3 From 10311540fab76c7e5530bf5f0267a3d1b8d5818e Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 9 Dec 2015 15:50:30 -0800 Subject: i40evf: null out ring pointers on free Since we check these ring pointers to make sure we don't double-allocate or double-free the rings, we had better null them out after we free them. In very rare cases this can cause a panic if the driver is removed during reset recovery. Change-ID: Ib06eb4910a3058275c8f7ec5ef7f45baa4674f96 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index d1c4335114fc..81d958422ccf 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1122,7 +1122,9 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter) if (!adapter->vsi_res) return; kfree(adapter->tx_rings); + adapter->tx_rings = NULL; kfree(adapter->rx_rings); + adapter->rx_rings = NULL; } /** -- cgit v1.2.3 From 8eed76fa4885f1ed9f19f4d3a16dd24cebf09c19 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 9 Dec 2015 15:50:31 -0800 Subject: i40e: Cleanup the code with respect to restarting autoneg The restart-autoneg work around does not apply to X722. Added a flag to set it only for the right MAC and right FW version where the work around should be applied. Signed-off-by: Anjali Singhai Jain Change-ID: I942c3ff40cccd1e56f424b1da776b020fe3c9d2a Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 47f6c0a9c9fe..53ed3bdd8363 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -340,6 +340,7 @@ struct i40e_pf { #define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE BIT_ULL(41) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) #define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) +#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) #define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d078a6331365..1a7022ca4750 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6889,8 +6889,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) wr32(hw, I40E_REG_MSS, val); } - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { msleep(75); ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (ret) @@ -8367,6 +8366,12 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } + if (((pf->hw.mac.type == I40E_MAC_X710) || + (pf->hw.mac.type == I40E_MAC_XL710)) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || + (pf->hw.aq.fw_maj_ver < 4))) + pf->flags |= I40E_FLAG_RESTART_AUTONEG; + if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->flags |= I40E_FLAG_VMDQ_ENABLED; @@ -10904,8 +10909,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) wr32(hw, I40E_REG_MSS, val); } - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_RESTART_AUTONEG) { msleep(75); err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); if (err) -- cgit v1.2.3 From 5afdaaa0555257f3c42b141908567d40aca0e1d1 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 10 Dec 2015 11:38:50 -0800 Subject: i40e: update features with right offload Synchronize code bases and add SCTP offload support. Change-ID: I9f99071f7176225479026930c387bf681a47494e Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1a7022ca4750..486ae1604138 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8947,11 +8947,11 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_RXCSUM | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_GRE | + NETIF_F_TSO | + 0; netdev->features = NETIF_F_SG | NETIF_F_IP_CSUM | -- cgit v1.2.3 From 4f9e697ebbf40ba482ad0481da1e978440f2a53e Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Thu, 10 Dec 2015 11:38:51 -0800 Subject: i40e: bump version to 1.4.10 Bump. Change-ID: Ic9a495feb9ab0606f953c3848b0acf67169d3930 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 486ae1604138..c88583e112fa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -51,7 +51,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 8 +#define DRV_VERSION_BUILD 10 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN -- cgit v1.2.3 From 35dae51de3e99db10f355642f5fc67719b93f558 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 22 Dec 2015 14:25:03 -0800 Subject: i40e: add new device IDs for X722 Add the KX and QSFP device IDs for X722. Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 2 ++ drivers/net/ethernet/intel/i40e/i40e_devids.h | 2 ++ drivers/net/ethernet/intel/i40e/i40e_main.c | 2 ++ 3 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 4bdb08bb27d7..3b03a3165ca7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -55,6 +55,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_20G_KR2_A: hw->mac.type = I40E_MAC_XL710; break; + case I40E_DEV_ID_KX_X722: + case I40E_DEV_ID_QSFP_X722: case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index 448ef4c17efb..f7ce5c7c9003 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -41,6 +41,8 @@ #define I40E_DEV_ID_10G_BASE_T4 0x1589 #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 +#define I40E_DEV_ID_KX_X722 0x37CE +#define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c88583e112fa..b3e671bb83ff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = { {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0}, + {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0}, {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0}, -- cgit v1.2.3 From 95a73780921aecc5e66022e000fcf8aeecfb53cf Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 22 Dec 2015 14:25:04 -0800 Subject: i40e: Extend ethtool RSS hooks for X722 This patch adds another way to access the RSS keys and lut using the AQ for X722 devices. Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 53 ++++++++++++++++++++++++++++- 1 file changed, 52 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b3e671bb83ff..bd81a9770c5b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7936,6 +7936,52 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) return ret; } +/** + * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands + * @vsi: Pointer to vsi structure + * @seed: Buffter to store the hash keys + * @lut: Buffer to store the lookup table entries + * @lut_size: Size of buffer to store the lookup table entries + * + * Return 0 on success, negative on failure + */ +static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed, + u8 *lut, u16 lut_size) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; + int ret = 0; + + if (seed) { + ret = i40e_aq_get_rss_key(hw, vsi->id, + (struct i40e_aqc_get_set_rss_key_data *)seed); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot get RSS key, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + + if (lut) { + bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false; + + ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size); + if (ret) { + dev_info(&pf->pdev->dev, + "Cannot get RSS lut, err %s aq_err %s\n", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, + pf->hw.aq.asq_last_status)); + return ret; + } + } + + return ret; +} + /** * i40e_config_rss_reg - Configure RSS keys and lut by writing registers * @vsi: Pointer to vsi structure @@ -8038,7 +8084,12 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) */ int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size) { - return i40e_get_rss_reg(vsi, seed, lut, lut_size); + struct i40e_pf *pf = vsi->back; + + if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) + return i40e_get_rss_aq(vsi, seed, lut, lut_size); + else + return i40e_get_rss_reg(vsi, seed, lut, lut_size); } /** -- cgit v1.2.3 From 3d0da5b78262c1f86294419c7a70e4c837aca159 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 22 Dec 2015 14:25:05 -0800 Subject: i40e/i40evf: Fix for UDP/TCP RSS for X722 The PCTYPES for the X710 and X722 families are different. This patch makes adjustments for that. Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 18 ++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 6 +++++ drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 29 +++++++++++++++++++--- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 8 +++--- 4 files changed, 54 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 252a9dd941d4..8a3f93ddbcc3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2168,6 +2168,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case 0: return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); break; default: @@ -2179,6 +2183,10 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case 0: return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); break; default: @@ -2190,6 +2198,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case 0: return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); break; @@ -2202,6 +2215,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) case 0: return -EINVAL; case (RXH_L4_B_0_1 | RXH_L4_B_2_3): + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 63e62f9aec6e..86aacb9f4d44 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1213,6 +1213,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG; } + if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2; + } + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index 8906785446ab..bd1c2728bc5c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -459,6 +459,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, struct ethtool_rxnfc *nfc) { struct i40e_hw *hw = &adapter->hw; + u32 flags = adapter->vf_res->vf_offload_flags; u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); @@ -477,19 +478,34 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, switch (nfc->flow_type) { case TCP_V4_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP); - else + } else { return -EINVAL; + } break; case TCP_V6_FLOW: - if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) + if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP); - else + } else { return -EINVAL; + } break; case UDP_V4_FLOW: if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4)); } else { @@ -498,6 +514,11 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter, break; case UDP_V6_FLOW: if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) { + if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena |= + BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | + BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); + hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6)); } else { diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 81d958422ccf..798f0dedf14f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1456,7 +1456,11 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter) int ret; /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ - hena = I40E_DEFAULT_RSS_HENA; + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2) + hena = I40E_DEFAULT_RSS_HENA_EXPANDED; + else + hena = I40E_DEFAULT_RSS_HENA; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); @@ -2507,8 +2511,6 @@ static void i40evf_init_task(struct work_struct *work) if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; - if (!RSS_AQ(adapter)) - i40evf_init_rss(adapter); err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; -- cgit v1.2.3 From f6d83d1376f496e6218080dd6eb663830672813f Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Tue, 22 Dec 2015 14:25:07 -0800 Subject: i40evf: add new write-back mode Add write-back on interrupt throttle rate timer expiration support for the i40evf driver, when running on X722 devices. Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 6 ++++++ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 16 ++++++++++++++++ drivers/net/ethernet/intel/i40evf/i40evf_main.c | 5 +++++ 3 files changed, 27 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 86aacb9f4d44..659d78270fdb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1222,6 +1222,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg) if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING; + if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) { + if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + vfres->vf_offload_flags |= + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR; + } + vfres->num_vsis = num_vsis; vfres->num_queue_pairs = vf->num_queue_pairs; vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7a00657dacda..7d663fb61927 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -252,6 +252,22 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) { + unsigned int j = 0; + /* check to see if there are < 4 descriptors + * waiting to be written back, then kick the hardware to force + * them to be written back in case we stay in NAPI. + * In this mode on X722 we do not enable Interrupt. + */ + j = i40evf_get_tx_pending(tx_ring); + + if (budget && + ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + } + netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), total_packets, total_bytes); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 798f0dedf14f..615ad0f1fdc4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2511,6 +2511,11 @@ static void i40evf_init_task(struct work_struct *work) if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + + if (adapter->vf_res->vf_offload_flags & + I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) + adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; + err = i40evf_request_misc_irq(adapter); if (err) goto err_sw_init; -- cgit v1.2.3 From 2803b16c10ea7eec170c485388f5f26ae30e92fe Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 22 Dec 2015 14:25:08 -0800 Subject: i40e/i40evf: Use private workqueue As done per ixgbe, use a private workqueue to avoid blocking the system workqueue. This avoids some strange side effects when some other entity is depending on the system work queue. Change-ID: Ic8ba08f5b03696cf638b21afd25fbae7738d55ee Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 15 ++++++++++++++- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 10 +++++++++- 2 files changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bd81a9770c5b..3e482bcd5287 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -112,6 +112,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *i40e_wq; + /** * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -297,7 +299,7 @@ static void i40e_service_event_schedule(struct i40e_pf *pf) if (!test_bit(__I40E_DOWN, &pf->state) && !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) - schedule_work(&pf->service_task); + queue_work(i40e_wq, &pf->service_task); } /** @@ -11470,6 +11472,16 @@ static int __init i40e_init_module(void) i40e_driver_string, i40e_driver_version_str); pr_info("%s: %s\n", i40e_driver_name, i40e_copyright); + /* we will see if single thread per module is enough for now, + * it can't be any worse than using the system workqueue which + * was already single threaded + */ + i40e_wq = create_singlethread_workqueue(i40e_driver_name); + if (!i40e_wq) { + pr_err("%s: Failed to create workqueue\n", i40e_driver_name); + return -ENOMEM; + } + i40e_dbg_init(); return pci_register_driver(&i40e_driver); } @@ -11484,6 +11496,7 @@ module_init(i40e_init_module); static void __exit i40e_exit_module(void) { pci_unregister_driver(&i40e_driver); + destroy_workqueue(i40e_wq); i40e_dbg_exit(); } module_exit(i40e_exit_module); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 615ad0f1fdc4..66964eb6b7de 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -69,6 +69,8 @@ MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); +static struct workqueue_struct *i40evf_wq; + /** * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code * @hw: pointer to the HW structure @@ -182,7 +184,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - schedule_work(&adapter->reset_task); + queue_work(i40evf_wq, &adapter->reset_task); } } @@ -2895,6 +2897,11 @@ static int __init i40evf_init_module(void) pr_info("%s\n", i40evf_copyright); + i40evf_wq = create_singlethread_workqueue(i40evf_driver_name); + if (!i40evf_wq) { + pr_err("%s: Failed to create workqueue\n", i40evf_driver_name); + return -ENOMEM; + } ret = pci_register_driver(&i40evf_driver); return ret; } @@ -2910,6 +2917,7 @@ module_init(i40evf_init_module); static void __exit i40evf_exit_module(void) { pci_unregister_driver(&i40evf_driver); + destroy_workqueue(i40evf_wq); } module_exit(i40evf_exit_module); -- cgit v1.2.3 From 4ba40bcea7046a1fa9b57a8cb4bcb8776e86e7ed Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 22 Dec 2015 14:25:09 -0800 Subject: i40e: add new proxy-wol bit for X722 Add the new proxy-wake-on-lan capability bit available with the new X722 device. Signed-off-by: Shannon Nelson Acked-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 1 + drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 256ce6549f61..bff09957cdc0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -402,6 +402,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 0d3bc3be0527..365a7d6231a0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -399,6 +399,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 #define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 +#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 #define I40E_AQ_CAP_ID_SRIOV 0x0012 #define I40E_AQ_CAP_ID_VF 0x0013 #define I40E_AQ_CAP_ID_VMDQ 0x0014 -- cgit v1.2.3 From 6dfae3892473e91080accdb2da8476481393f769 Mon Sep 17 00:00:00 2001 From: Greg Bowers Date: Tue, 22 Dec 2015 14:25:10 -0800 Subject: i40e: Limit DCB FW version checks to X710/XL710 devices X710/XL710 devices require FW version checks to properly handle DCB configurations from the FW. Newer devices do not, so limit these checks to X710/XL710. Signed-off-by: Greg Bowers Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_dcb.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 2691277c0055..582daa7ad776 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -814,13 +814,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw) struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg; struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg; - /* If Firmware version < v4.33 IEEE only */ - if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || - (hw->aq.fw_maj_ver < 4)) + /* If Firmware version < v4.33 on X710/XL710, IEEE only */ + if ((hw->mac.type == I40E_MAC_XL710) && + (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || + (hw->aq.fw_maj_ver < 4))) return i40e_get_ieee_dcb_config(hw); - /* If Firmware version == v4.33 use old CEE struct */ - if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) { + /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */ + if ((hw->mac.type == I40E_MAC_XL710) && + ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) { ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg, sizeof(cee_v1_cfg), NULL); if (!ret) { -- cgit v1.2.3 From 5394f02f0c0553f97bb4c5596a34c9a7333c032b Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 22 Dec 2015 14:25:11 -0800 Subject: i40e: AQ Add Run PHY Activity struct Add the AQ opcode and struct definitions for the Run PHY Activity command Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 13 +++++++++++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 13 +++++++++++++ 2 files changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index bff09957cdc0..9e340ca03440 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -220,6 +220,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -1825,6 +1826,18 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 365a7d6231a0..51d83c634519 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -220,6 +220,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_get_phy_wol_caps = 0x0621, i40e_aqc_opc_set_phy_debug = 0x0622, i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, /* NVM commands */ i40e_aqc_opc_nvm_read = 0x0701, @@ -1822,6 +1823,18 @@ enum i40e_aq_phy_reg_type { I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 }; +/* Run PHY Activity (0x0626) */ +struct i40e_aqc_run_phy_activity { + __le16 activity_id; + u8 flags; + u8 reserved1; + __le32 control; + __le32 data; + u8 reserved2[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); + /* NVM Read command (indirect 0x0701) * NVM Erase commands (direct 0x0702) * NVM Update commands (indirect 0x0703) -- cgit v1.2.3 From 5926425368ba8f1b186d45d96020c288e3bb9b8d Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 22 Dec 2015 14:25:12 -0800 Subject: i40e: AQ Geneve cloud tunnel type Fix the name of the new cloud tunnel type from the place-holder NGE name to the official Geneve. Also fix the spelling of the VXLAN type. Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Signed-off-by: Jeff Kirsher Tested-by: Andrew Bowers --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 4 ++-- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 9e340ca03440..eab55eacb01c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1260,9 +1260,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 __le32 tenant_id; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 51d83c634519..30b5a33aa424 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1257,9 +1257,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN 0 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 __le32 tenant_id; -- cgit v1.2.3 From 5eb772f7ca86267565ef40c7b987c88405689b96 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Tue, 22 Dec 2015 14:25:13 -0800 Subject: i40e: AQ Add external power class to get link status Add the new External Device Power Ability field to the get_link_status data structure, using space from the reserved field at the end of the struct. Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 7 ++++++- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index eab55eacb01c..0e608d2a70d5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1758,7 +1758,12 @@ struct i40e_aqc_get_link_status { u8 config; #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 30b5a33aa424..578b1780fb08 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1755,7 +1755,12 @@ struct i40e_aqc_get_link_status { u8 config; #define I40E_AQ_CONFIG_CRC_ENA 0x04 #define I40E_AQ_CONFIG_PACING_MASK 0x78 - u8 reserved[5]; + u8 external_power_ability; +#define I40E_AQ_LINK_POWER_CLASS_1 0x00 +#define I40E_AQ_LINK_POWER_CLASS_2 0x01 +#define I40E_AQ_LINK_POWER_CLASS_3 0x02 +#define I40E_AQ_LINK_POWER_CLASS_4 0x03 + u8 reserved[4]; }; I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); -- cgit v1.2.3 From f8db54cc4df7b065b0028f8c919e2f47983f2043 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Tue, 22 Dec 2015 14:25:14 -0800 Subject: i40e: add 100Mb ethtool reporting Add some missing reporting/advertisement of 100Mb capability for adapters that support it. Change-ID: I8b8523fbdc99517bec29d90c71b3744db11542ac Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 4 ++++ drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 8a3f93ddbcc3..45495911c5a4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -411,6 +411,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw, if (pf->hw.mac.type == I40E_MAC_X722) { ecmd->supported |= SUPPORTED_100baseT_Full; ecmd->advertising |= ADVERTISED_100baseT_Full; + if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) { + ecmd->supported |= SUPPORTED_100baseT_Full; + ecmd->advertising |= ADVERTISED_100baseT_Full; + } } } if (phy_types & I40E_CAP_PHY_TYPE_XAUI || diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3e482bcd5287..320b0491abd9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8453,6 +8453,7 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_100M_SGMII_CAPABLE | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } pf->eeprom_version = 0xDEAD; -- cgit v1.2.3 From 296d48568042360d0e2a6e6e91b0130acb5ca738 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 27 Jan 2016 23:33:28 -0800 Subject: ipvlan: inherit MTU from master device When we create IPvlan slave; we use ether_setup() and that sets up default MTU to 1500 while the master device may have lower / different MTU. Any subsequent changes to the masters' MTU are reflected into the slaves' MTU setting. However if those don't happen (most likely scenario), the slaves' MTU stays at 1500 which could be bad. This change adds code to inherit MTU from the master device instead of using the default value during the link initialization phase. Signed-off-by: Mahesh Bandewar CC: Eric Dumazet CC: Tim Hockins Acked-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index f94392d07126..7a3b41468a55 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -468,6 +468,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->dev = dev; ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; + ipvlan_adjust_mtu(ipvlan, phy_dev); INIT_LIST_HEAD(&ipvlan->addrs); /* TODO Probably put random address here to be presented to the -- cgit v1.2.3 From bb63daf9efb4f2bcb657d7179a53bd808f978dc9 Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Mon, 1 Feb 2016 18:51:06 -0500 Subject: team: track sum of rx_nohandler for all slaves CC: Jiri Pirko CC: netdev@vger.kernel.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/team/team.c | 10 +++++++--- include/linux/if_team.h | 1 + 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 718ceeab4dbc..00558e139584 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -758,6 +758,8 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) u64_stats_update_end(&pcpu_stats->syncp); skb->dev = team->dev; + } else if (res == RX_HANDLER_EXACT) { + this_cpu_inc(team->pcpu_stats->rx_nohandler); } else { this_cpu_inc(team->pcpu_stats->rx_dropped); } @@ -1807,7 +1809,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) struct team *team = netdev_priv(dev); struct team_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; - u32 rx_dropped = 0, tx_dropped = 0; + u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0; unsigned int start; int i; @@ -1828,14 +1830,16 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* - * rx_dropped & tx_dropped are u32, updated - * without syncp protection. + * rx_dropped, tx_dropped & rx_nohandler are u32, + * updated without syncp protection. */ rx_dropped += p->rx_dropped; tx_dropped += p->tx_dropped; + rx_nohandler += p->rx_nohandler; } stats->rx_dropped = rx_dropped; stats->tx_dropped = tx_dropped; + stats->rx_nohandler = rx_nohandler; return stats; } diff --git a/include/linux/if_team.h b/include/linux/if_team.h index b84e49c3a738..174f43f43aff 100644 --- a/include/linux/if_team.h +++ b/include/linux/if_team.h @@ -24,6 +24,7 @@ struct team_pcpu_stats { struct u64_stats_sync syncp; u32 rx_dropped; u32 tx_dropped; + u32 rx_nohandler; }; struct team; -- cgit v1.2.3 From f344b0d940d2da88c23b864f818da43081ce300f Mon Sep 17 00:00:00 2001 From: Jarod Wilson Date: Mon, 1 Feb 2016 18:51:07 -0500 Subject: bond: track sum of rx_nohandler for all slaves Sample output with this set applied for an active-backup bond: $ cat /sys/devices/virtual/net/bond0/lower_p7p1/statistics/rx_nohandler 16568 $ cat /sys/devices/virtual/net/bond0/lower_p5p2/statistics/rx_nohandler 16583 $ cat /sys/devices/virtual/net/bond0/statistics/rx_nohandler 33151 CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek CC: netdev@vger.kernel.org Signed-off-by: Jarod Wilson Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 56b560558884..6587929b040a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3309,6 +3309,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes; stats->rx_errors += sstats->rx_errors - pstats->rx_errors; stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped; + stats->rx_nohandler += sstats->rx_nohandler - pstats->rx_nohandler; stats->tx_packets += sstats->tx_packets - pstats->tx_packets;; stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes; -- cgit v1.2.3 From 387b75f8b31437792e8334390fdf5cf060d1e3da Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 2 Feb 2016 07:47:14 +0100 Subject: bgmac: add helper checking for BCM4707 / BCM53018 chip id MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Chipsets with BCM4707 / BCM53018 ID require special handling at a few places in the code. It's likely there will be more IDs to check in the future. To simplify it add this trivial helper. Signed-off-by: Rafał Miłecki Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 06f6cffdfaf5..230f8e6209e5 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -26,6 +26,17 @@ static const struct bcma_device_id bgmac_bcma_tbl[] = { }; MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); +static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) +{ + switch (bgmac->core->bus->chipinfo.id) { + case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM53018: + return true; + default: + return false; + } +} + static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { @@ -987,11 +998,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac) static void bgmac_miiconfig(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; - struct bcma_chipinfo *ci = &core->bus->chipinfo; u8 imode; - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) { + if (bgmac_is_bcm4707_family(bgmac)) { bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); @@ -1055,9 +1064,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac) } /* Request Misc PLL for corerev > 2 */ - if (core->id.rev > 2 && - ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM53018) { + if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, @@ -1193,8 +1200,7 @@ static void bgmac_enable(struct bgmac *bgmac) break; } - if (ci->id != BCMA_CHIP_ID_BCM4707 && - ci->id != BCMA_CHIP_ID_BCM53018) { + if (!bgmac_is_bcm4707_family(bgmac)) { rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / @@ -1472,14 +1478,12 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac) static int bgmac_mii_register(struct bgmac *bgmac) { - struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; struct mii_bus *mii_bus; struct phy_device *phy_dev; char bus_id[MII_BUS_ID_SIZE + 3]; int err = 0; - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) + if (bgmac_is_bcm4707_family(bgmac)) return bgmac_fixed_phy_register(bgmac); mii_bus = mdiobus_alloc(); @@ -1539,7 +1543,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac) /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { - struct bcma_chipinfo *ci = &core->bus->chipinfo; struct net_device *net_dev; struct bgmac *bgmac; struct ssb_sprom *sprom = &core->bus->sprom; @@ -1620,8 +1623,7 @@ static int bgmac_probe(struct bcma_device *core) bgmac_chip_reset(bgmac); /* For Northstar, we have to take all GMAC core out of reset */ - if (ci->id == BCMA_CHIP_ID_BCM4707 || - ci->id == BCMA_CHIP_ID_BCM53018) { + if (bgmac_is_bcm4707_family(bgmac)) { struct bcma_device *ns_core; int ns_gmac; -- cgit v1.2.3 From c6140a299bdf2425d092383fafaaf2a4a3745989 Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Tue, 2 Feb 2016 08:32:55 +0000 Subject: bonding: add slave device name for debug netdev_dbg() will add bond device name, it will be helpful if we print slave device name. Signed-off-by: Zhang Shengju Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6587929b040a..7c9eb6704a7d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -618,8 +618,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, static void bond_set_dev_addr(struct net_device *bond_dev, struct net_device *slave_dev) { - netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n", - bond_dev, slave_dev, slave_dev->addr_len); + netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n", + bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len); memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); bond_dev->addr_assign_type = NET_ADDR_STOLEN; call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); -- cgit v1.2.3 From 1b3b36b044939b438c4463e8bde8cd644ba82032 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 16 Jan 2016 00:48:32 +0100 Subject: bcma: support identifying MX25L25635F serial flash MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's a Macronix 32 MiB flash found on board with BCM47189 SoC. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_chipcommon_sflash.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c index 7e11ef4cb7db..04d706ca5f43 100644 --- a/drivers/bcma/driver_chipcommon_sflash.c +++ b/drivers/bcma/driver_chipcommon_sflash.c @@ -38,6 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = { { "M25P32", 0x15, 0x10000, 64, }, { "M25P64", 0x16, 0x10000, 128, }, { "M25FL128", 0x17, 0x10000, 256, }, + { "MX25L25635F", 0x18, 0x10000, 512, }, { NULL }, }; -- cgit v1.2.3 From 7267bcda332e2782e21a559f3b1b859a35b4062d Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 16 Jan 2016 00:48:52 +0100 Subject: bcma: identify bus cores (devices) found on BCM47189 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add missing defines and print proper names. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/scan.c | 3 +++ include/linux/bcma/bcma.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index df806b9c5490..5ee731132365 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -98,6 +98,9 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = { { BCMA_CORE_SHIM, "SHIM" }, { BCMA_CORE_PCIE2, "PCIe Gen2" }, { BCMA_CORE_ARM_CR4, "ARM CR4" }, + { BCMA_CORE_GCI, "GCI" }, + { BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" }, + { BCMA_CORE_ARM_CA7, "ARM CA7" }, { BCMA_CORE_DEFAULT, "Default" }, }; diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 3feb1b2d75d8..991ebb4c2015 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -151,6 +151,8 @@ struct bcma_host_ops { #define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */ #define BCMA_CORE_USB30_DEV 0x83D #define BCMA_CORE_ARM_CR4 0x83E +#define BCMA_CORE_GCI 0x840 +#define BCMA_CORE_CMEM 0x846 /* CNDS DDR2/3 memory controller */ #define BCMA_CORE_ARM_CA7 0x847 #define BCMA_CORE_SYS_MEM 0x849 #define BCMA_CORE_DEFAULT 0xFFF -- cgit v1.2.3 From 0c06f5d43e1f969bae3fa7832a94af46c75db9a7 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sat, 16 Jan 2016 00:48:53 +0100 Subject: bcma: support chipsets with PMU and GCI cores (devices) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Both cores are another exceptions. They are not accessed in a standard way and to they don't need or have wrapping addresses. This fixes bus scanning after finding such core. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/scan.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c index 5ee731132365..4a2d1b235fb5 100644 --- a/drivers/bcma/scan.c +++ b/drivers/bcma/scan.c @@ -318,6 +318,8 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr, switch (core->id.id) { case BCMA_CORE_4706_MAC_GBIT_COMMON: case BCMA_CORE_NS_CHIPCOMMON_B: + case BCMA_CORE_PMU: + case BCMA_CORE_GCI: /* Not used yet: case BCMA_CORE_OOB_ROUTER: */ break; default: -- cgit v1.2.3 From 67edf354faaf93156646e741483b2313bc756c0f Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 19 Jan 2016 08:45:25 +0100 Subject: bcma: use _PMU_ in all names of PMU registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PMU (Power Management Unit) seems to be a separated piece of hardware, just accessed using ChipCommon core registers. In recent Broadcom chipsets PMU is not bounded to CC but available as separated core. To make code cleaner & easier to review (for a correct R/W access) use clearer names. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_chipcommon_pmu.c | 46 ++++++++++++++--------------- drivers/net/wireless/broadcom/b43/main.c | 8 ++--- include/linux/bcma/bcma_driver_chipcommon.h | 12 ++++---- 3 files changed, 33 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index fe0d48cb1778..472f39dc5a38 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c @@ -15,44 +15,44 @@ u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) { - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); - return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); + return bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_read); void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) { - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_write); void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR); - bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); + bcma_cc_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset); void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { - bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR); - bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set); + bcma_cc_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR); + bcma_cc_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset); void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { - bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR); - bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set); + bcma_cc_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset); + bcma_cc_read32(cc, BCMA_CC_PMU_REGCTL_ADDR); + bcma_cc_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); @@ -472,8 +472,8 @@ u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc) static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) { - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset); - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); } void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid) @@ -497,20 +497,20 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid) bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0; /* RMW only the P1 divider */ - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, BCMA_CC_PMU_PLL_CTL0 + phypll_offset); - tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); + tmp = bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK)); tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT); - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); /* RMW only the int feedback divider */ - bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, BCMA_CC_PMU_PLL_CTL2 + phypll_offset); - tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA); + tmp = bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK); tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT; - bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp); + bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); tmp = BCMA_CC_PMU_CTL_PLL_UPD; break; diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index ec013fbd6a81..c279211e49f9 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -1215,10 +1215,10 @@ void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev) case B43_BUS_BCMA: bcma_cc = &dev->dev->bdev->bus->drv_cc; - bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0); - bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); - bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4); - bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4); + bcma_cc_write32(bcma_cc, BCMA_CC_PMU_CHIPCTL_ADDR, 0); + bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4); + bcma_cc_set32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, 0x4); + bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4); break; #endif #ifdef CONFIG_B43_SSB diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index db51a6ffb7d6..96d8d56f240f 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -351,12 +351,12 @@ #define BCMA_CC_PMU_RES_REQTS 0x0640 /* PMU res req timer sel */ #define BCMA_CC_PMU_RES_REQT 0x0644 /* PMU res req timer */ #define BCMA_CC_PMU_RES_REQM 0x0648 /* PMU res req mask */ -#define BCMA_CC_CHIPCTL_ADDR 0x0650 -#define BCMA_CC_CHIPCTL_DATA 0x0654 -#define BCMA_CC_REGCTL_ADDR 0x0658 -#define BCMA_CC_REGCTL_DATA 0x065C -#define BCMA_CC_PLLCTL_ADDR 0x0660 -#define BCMA_CC_PLLCTL_DATA 0x0664 +#define BCMA_CC_PMU_CHIPCTL_ADDR 0x0650 +#define BCMA_CC_PMU_CHIPCTL_DATA 0x0654 +#define BCMA_CC_PMU_REGCTL_ADDR 0x0658 +#define BCMA_CC_PMU_REGCTL_DATA 0x065C +#define BCMA_CC_PMU_PLLCTL_ADDR 0x0660 +#define BCMA_CC_PMU_PLLCTL_DATA 0x0664 #define BCMA_CC_PMU_STRAPOPT 0x0668 /* (corerev >= 28) */ #define BCMA_CC_PMU_XTAL_FREQ 0x066C /* (pmurev >= 10) */ #define BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK 0x00001FFF -- cgit v1.2.3 From b3c47afbf54d86daa0473895e8ca9e8b663f5c1a Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 19 Jan 2016 08:45:26 +0100 Subject: bcma: support PMU present as separated bus core MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On recent Broadcom chipsets PMU is present as separated core and it can't be accessed using ChipCommon anymore as it fails with e.g.: [ 0.000577] Unhandled fault: external abort on non-linefetch (0x1008) at 0xf1000604 Solve it by using a new (PMU) core pointer set to ChipCommon or PMU depending on the hardware capabilities. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_chipcommon.c | 2 +- drivers/bcma/driver_chipcommon_pmu.c | 94 ++++++++++++++++------------- include/linux/bcma/bcma_driver_chipcommon.h | 19 ++++++ 3 files changed, 72 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index b7c8a8d4e6d1..36ee221e298f 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -185,7 +185,7 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) ticks = 2; else if (ticks > maxt) ticks = maxt; - bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); + bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); } else { struct bcma_bus *bus = cc->core->bus; diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c index 472f39dc5a38..f1eb4d3e1d57 100644 --- a/drivers/bcma/driver_chipcommon_pmu.c +++ b/drivers/bcma/driver_chipcommon_pmu.c @@ -15,44 +15,44 @@ u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset) { - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); - return bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); + return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_read); void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) { - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_write); void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); - bcma_cc_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR); + bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset); void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { - bcma_cc_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR); - bcma_cc_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set); + bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset); + bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR); + bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset); void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask, u32 set) { - bcma_cc_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset); - bcma_cc_read32(cc, BCMA_CC_PMU_REGCTL_ADDR); - bcma_cc_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set); + bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset); + bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR); + bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set); } EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset); @@ -60,18 +60,18 @@ static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc) { u32 ilp_ctl, alp_hz; - if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) & + if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) & BCMA_CC_PMU_STAT_EXT_LPO_AVAIL)) return 0; - bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, - BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT)); + bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, + BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT)); usleep_range(1000, 2000); - ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ); + ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ); ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK; - bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0); + bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0); alp_hz = ilp_ctl * 32768 / 4; return (alp_hz + 50000) / 100000 * 100; @@ -127,8 +127,8 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq) mask = (u32)~(BCMA_RES_4314_HT_AVAIL | BCMA_RES_4314_MACPHY_CLK_AVAIL); - bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask); - bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask); + bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask); + bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask); bcma_wait_value(cc->core, BCMA_CLKCTLST, BCMA_CLKCTLST_HAVEHT, 0, 20000); break; @@ -140,7 +140,7 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq) /* Flush */ if (cc->pmu.rev >= 2) - bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD); + bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD); /* TODO: Do we need to update OTP? */ } @@ -195,9 +195,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc) /* Set the resource masks. */ if (min_msk) - bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); + bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk); if (max_msk) - bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); + bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk); /* * Add some delay; allow resources to come up and settle. @@ -269,23 +269,33 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc) void bcma_pmu_early_init(struct bcma_drv_cc *cc) { + struct bcma_bus *bus = cc->core->bus; u32 pmucap; - pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP); + if (cc->core->id.rev >= 35 && + cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { + cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU); + if (!cc->pmu.core) + bcma_warn(bus, "Couldn't find expected PMU core"); + } + if (!cc->pmu.core) + cc->pmu.core = cc->core; + + pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP); cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION); - bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n", - cc->pmu.rev, pmucap); + bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev, + pmucap); } void bcma_pmu_init(struct bcma_drv_cc *cc) { if (cc->pmu.rev == 1) - bcma_cc_mask32(cc, BCMA_CC_PMU_CTL, - ~BCMA_CC_PMU_CTL_NOILPONW); + bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL, + ~BCMA_CC_PMU_CTL_NOILPONW); else - bcma_cc_set32(cc, BCMA_CC_PMU_CTL, - BCMA_CC_PMU_CTL_NOILPONW); + bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, + BCMA_CC_PMU_CTL_NOILPONW); bcma_pmu_pll_init(cc); bcma_pmu_resources_init(cc); @@ -472,8 +482,8 @@ u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc) static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value) { - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value); } void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid) @@ -497,20 +507,20 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid) bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0; /* RMW only the P1 divider */ - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, BCMA_CC_PMU_PLL_CTL0 + phypll_offset); - tmp = bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); + tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK)); tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT); - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); /* RMW only the int feedback divider */ - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, BCMA_CC_PMU_PLL_CTL2 + phypll_offset); - tmp = bcma_cc_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); + tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA); tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK); tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT; - bcma_cc_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); + bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp); tmp = BCMA_CC_PMU_CTL_PLL_UPD; break; @@ -646,7 +656,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid) break; } - tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL); - bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp); + tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL); + bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp); } EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate); diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 96d8d56f240f..700d0c6f7480 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -217,6 +217,11 @@ #define BCMA_CC_CLKDIV_JTAG_SHIFT 8 #define BCMA_CC_CLKDIV_UART 0x000000FF #define BCMA_CC_CAP_EXT 0x00AC /* Capabilities */ +#define BCMA_CC_CAP_EXT_SECI_PRESENT 0x00000001 +#define BCMA_CC_CAP_EXT_GSIO_PRESENT 0x00000002 +#define BCMA_CC_CAP_EXT_GCI_PRESENT 0x00000004 +#define BCMA_CC_CAP_EXT_SECI_PUART_PRESENT 0x00000008 /* UART present */ +#define BCMA_CC_CAP_EXT_AOB_PRESENT 0x00000040 #define BCMA_CC_PLLONDELAY 0x00B0 /* Rev >= 4 only */ #define BCMA_CC_FREFSELDELAY 0x00B4 /* Rev >= 4 only */ #define BCMA_CC_SLOWCLKCTL 0x00B8 /* 6 <= Rev <= 9 only */ @@ -566,6 +571,7 @@ * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU) */ struct bcma_chipcommon_pmu { + struct bcma_device *core; /* Can be separated core or just ChipCommon one */ u8 rev; /* PMU revision */ u32 crystalfreq; /* The active crystal frequency (in kHz) */ }; @@ -660,6 +666,19 @@ struct bcma_drv_cc_b { #define bcma_cc_maskset32(cc, offset, mask, set) \ bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set)) +/* PMU registers access */ +#define bcma_pmu_read32(cc, offset) \ + bcma_read32((cc)->pmu.core, offset) +#define bcma_pmu_write32(cc, offset, val) \ + bcma_write32((cc)->pmu.core, offset, val) + +#define bcma_pmu_mask32(cc, offset, mask) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask)) +#define bcma_pmu_set32(cc, offset, set) \ + bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set)) +#define bcma_pmu_maskset32(cc, offset, mask, set) \ + bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set)) + extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks); extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc); -- cgit v1.2.3 From 36e8072ebeb1981074c7ec1b63bae3ecc3295b4b Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Wed, 20 Jan 2016 16:46:04 +0100 Subject: brcmfmac: fix setting primary channel for 80 MHz width MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit First of all it changes the way we calculate primary channel offset. If we use e.g. 80 MHz channel with primary frequency 5180 MHz (which means center frequency is 5210 MHz) it makes sense to calculate primary offset as -30 MHz. Then it fixes values we compare primary_offset with. We were comparing offset in MHz against -2 or 2 which was resulting in picking a wrong primary channel. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 23 ++++++++++------------ 1 file changed, 10 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 7b01e4ddb315..d00c5c1d58bf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -247,7 +247,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n", ch->chan->center_freq, ch->center_freq1, ch->width); ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1); - primary_offset = ch->center_freq1 - ch->chan->center_freq; + primary_offset = ch->chan->center_freq - ch->center_freq1; switch (ch->width) { case NL80211_CHAN_WIDTH_20: case NL80211_CHAN_WIDTH_20_NOHT: @@ -256,24 +256,21 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf, break; case NL80211_CHAN_WIDTH_40: ch_inf.bw = BRCMU_CHAN_BW_40; - if (primary_offset < 0) + if (primary_offset > 0) ch_inf.sb = BRCMU_CHAN_SB_U; else ch_inf.sb = BRCMU_CHAN_SB_L; break; case NL80211_CHAN_WIDTH_80: ch_inf.bw = BRCMU_CHAN_BW_80; - if (primary_offset < 0) { - if (primary_offset < -CH_10MHZ_APART) - ch_inf.sb = BRCMU_CHAN_SB_UU; - else - ch_inf.sb = BRCMU_CHAN_SB_UL; - } else { - if (primary_offset > CH_10MHZ_APART) - ch_inf.sb = BRCMU_CHAN_SB_LL; - else - ch_inf.sb = BRCMU_CHAN_SB_LU; - } + if (primary_offset == -30) + ch_inf.sb = BRCMU_CHAN_SB_LL; + else if (primary_offset == -10) + ch_inf.sb = BRCMU_CHAN_SB_LU; + else if (primary_offset == 10) + ch_inf.sb = BRCMU_CHAN_SB_UL; + else + ch_inf.sb = BRCMU_CHAN_SB_UU; break; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: -- cgit v1.2.3 From 4c81acab3816c6941900c2abdb412df6cf34bc74 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 22 Jan 2016 18:02:54 +0100 Subject: bcma: init serial console directly from ChipCommon code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit UART is connected to and controlled over ChipCommon core. It doesn't have much to do with MIPS core (where we initialize it currently) except just existing on embedded systemms. There isn't point of such cross-core initialization (and we needed #ifdef anyway) so just handle it in ChipCommon. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/bcma_private.h | 1 - drivers/bcma/driver_chipcommon.c | 13 ++++++++++--- drivers/bcma/driver_mips.c | 3 --- 3 files changed, 10 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 38f156745d53..7e4ddfb076d3 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -48,7 +48,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); #ifdef CONFIG_BCMA_DRIVER_MIPS -void bcma_chipco_serial_init(struct bcma_drv_cc *cc); extern struct platform_device bcma_pflash_dev; #endif /* CONFIG_BCMA_DRIVER_MIPS */ diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index 36ee221e298f..bdb73d97da63 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -15,6 +15,8 @@ #include #include +static void bcma_chipco_serial_init(struct bcma_drv_cc *cc); + static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset, u32 mask, u32 value) { @@ -115,6 +117,8 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc) void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) { + struct bcma_bus *bus = cc->core->bus; + if (cc->early_setup_done) return; @@ -129,6 +133,9 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) if (cc->capabilities & BCMA_CC_CAP_PMU) bcma_pmu_early_init(cc); + if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC) + bcma_chipco_serial_init(cc); + cc->early_setup_done = true; } @@ -314,9 +321,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value) return res; } -#ifdef CONFIG_BCMA_DRIVER_MIPS -void bcma_chipco_serial_init(struct bcma_drv_cc *cc) +static void bcma_chipco_serial_init(struct bcma_drv_cc *cc) { +#if IS_BUILTIN(CONFIG_BCM47XX) unsigned int irq; u32 baud_base; u32 i; @@ -358,5 +365,5 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc) ports[i].baud_base = baud_base; ports[i].reg_shift = 0; } +#endif /* CONFIG_BCM47XX */ } -#endif /* CONFIG_BCMA_DRIVER_MIPS */ diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 24424f3fef96..a40a203314db 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -337,12 +337,9 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) { - struct bcma_bus *bus = mcore->core->bus; - if (mcore->early_setup_done) return; - bcma_chipco_serial_init(&bus->drv_cc); bcma_core_mips_flash_detect(mcore); mcore->early_setup_done = true; -- cgit v1.2.3 From 61dba73cdbba8ec5c01b31beaf9e2debc2d2f273 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sun, 24 Jan 2016 16:37:33 +0100 Subject: bcma: add support for BCM47094 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's another SoC with 32 GPIOs and simplified watchdog handling. It was tested on D-Link DIR-885L. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_chipcommon.c | 1 + drivers/bcma/driver_gpio.c | 1 + include/linux/bcma/bcma.h | 1 + 3 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index bdb73d97da63..b0f44a2937b9 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -197,6 +197,7 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) struct bcma_bus *bus = cc->core->bus; if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 && + bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 && bus->chipinfo.id != BCMA_CHIP_ID_BCM53018) bcma_core_set_clockmode(cc->core, ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC); diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c index 504899a72966..77b0738fbe1b 100644 --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c @@ -197,6 +197,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc) case BCMA_CHIP_ID_BCM4707: case BCMA_CHIP_ID_BCM5357: case BCMA_CHIP_ID_BCM53572: + case BCMA_CHIP_ID_BCM47094: chip->ngpio = 32; break; default: diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 991ebb4c2015..0367c63f5960 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -201,6 +201,7 @@ struct bcma_host_ops { #define BCMA_PKG_ID_BCM4707 1 #define BCMA_PKG_ID_BCM4708 2 #define BCMA_PKG_ID_BCM4709 0 +#define BCMA_CHIP_ID_BCM47094 53030 #define BCMA_CHIP_ID_BCM53018 53018 /* Board types (on PCI usually equals to the subsystem dev id) */ -- cgit v1.2.3 From 515b399c9a204da70d54916d23c1748ff7083fb8 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 26 Jan 2016 17:02:09 +0100 Subject: bcma: claim only 14e4:4365 PCI Dell card with SoftMAC BCM43142 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It seems 14e4:4365 pattern is too generic as there are two devices: 1) 14e4:4365 1028:0016 with SoftMAC BCM43142 chipset 2) 14e4:4365 14e4:4365 with FullMAC BCM4366 chipset The later one was found in D-Link DIR-885L router and we want to let brcmfmac handle it. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/host_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 0856189c065f..cae5385cf499 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -294,7 +294,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) }, - { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, -- cgit v1.2.3 From a4f4abd037c1f8c6a834c95a46a2514d4b350888 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 26 Jan 2016 17:57:01 +0100 Subject: brcmfmac: analyze descriptors of current component only MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit So far we were looking for address descriptors without a check for crossing current component border. In case of dealing with unsupported descriptor or descriptor missing at all the code would incorrectly get data from another component. Consider this binary-described component from BCM4366 EROM: 4bf83b01 TAG==CI CID==0x83b 20080201 TAG==CI PORTS==0+1 WRAPPERS==0+1 18400035 TAG==ADDR SZ_SZD TYPE_SLAVE 00050000 18107085 TAG==ADDR SZ_4K TYPE_SWRAP Driver was assigning invalid base address to this core: brcmfmac: [6 ] core 0x83b:32 base 0x18109000 wrap 0x18107000 which came from totally different component defined in EROM: 43b36701 TAG==CI CID==0x367 00000201 TAG==CI PORTS==0+1 WRAPPERS==0+0 18109005 TAG==ADDR SZ_4K TYPE_SLAVE This change will also allow us to support components without wrapper address in the future. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 82e4382eb177..e434e2a52098 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -803,7 +803,14 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, *eromaddr -= 4; return -EFAULT; } - } while (desc != DMP_DESC_ADDRESS); + } while (desc != DMP_DESC_ADDRESS && + desc != DMP_DESC_COMPONENT); + + /* stop if we crossed current component border */ + if (desc == DMP_DESC_COMPONENT) { + *eromaddr -= 4; + return 0; + } /* skip upper 32-bit address descriptor */ if (val & DMP_DESC_ADDRSIZE_GT32) -- cgit v1.2.3 From 44977b81823f4801d45d61fb9b278b4483923ae1 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 26 Jan 2016 17:57:02 +0100 Subject: brcmfmac: allow storing PMU core without wrapper address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Separated PMU core can be found in new devices and should be used for accessing PMU registers (which were routed through ChipCommon so far). This core is one of exceptions that doesn't have or need wrapper address to be still safely accessible. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index e434e2a52098..55952d43dfb5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -883,7 +883,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S; /* need core with ports */ - if (nmw + nsw == 0) + if (nmw + nsw == 0 && + id != BCMA_CORE_PMU) continue; /* try to obtain register address info */ -- cgit v1.2.3 From 9befe9195a17856f8bf5ede7a5c067bd0a281be6 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 26 Jan 2016 17:57:03 +0100 Subject: brcmfmac: read extended capabilities of ChipCommon core MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an extra bitfield with info about some present hardware. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c | 3 +++ drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 55952d43dfb5..f4a4d00cb4a2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -1025,6 +1025,9 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip) /* get chipcommon capabilites */ pub->cc_caps = chip->ops->read32(chip->ctx, CORE_CC_REG(base, capabilities)); + pub->cc_caps_ext = chip->ops->read32(chip->ctx, + CORE_CC_REG(base, + capabilities_ext)); /* get pmu caps & rev */ if (pub->cc_caps & CC_CAP_PMU) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index f6b5feea23d2..cb9145f1786a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h @@ -27,6 +27,7 @@ * @chip: chip identifier. * @chiprev: chip revision. * @cc_caps: chipcommon core capabilities. + * @cc_caps_ext: chipcommon core extended capabilities. * @pmucaps: PMU capabilities. * @pmurev: PMU revision. * @rambase: RAM base address (only applicable for ARM CR4 chips). @@ -38,6 +39,7 @@ struct brcmf_chip { u32 chip; u32 chiprev; u32 cc_caps; + u32 cc_caps_ext; u32 pmucaps; u32 pmurev; u32 rambase; -- cgit v1.2.3 From e2b397f18cc6423c2dad87f9a633add762d869dd Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 26 Jan 2016 17:57:04 +0100 Subject: brcmfmac: access PMU registers using standalone PMU core if available MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On recent Broadcom chipsets PMU is present as separated core and it can't be accessed using ChipCommon anymore as it fails with e.g.: [ 18.198412] Unhandled fault: imprecise external abort (0x1406) at 0xb6da200f Add a new helper function that will return a proper core that should be used for accessing PMU registers. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/chip.c | 30 ++++++++++++++++++---- .../wireless/broadcom/brcm80211/brcmfmac/chip.h | 1 + .../wireless/broadcom/brcm80211/brcmfmac/sdio.c | 9 +++---- 3 files changed, 30 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index f4a4d00cb4a2..0e8f2a079907 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -1014,6 +1014,7 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip) { struct brcmf_chip *pub; struct brcmf_core_priv *cc; + struct brcmf_core *pmu; u32 base; u32 val; int ret = 0; @@ -1030,9 +1031,10 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip) capabilities_ext)); /* get pmu caps & rev */ + pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */ if (pub->cc_caps & CC_CAP_PMU) { val = chip->ops->read32(chip->ctx, - CORE_CC_REG(base, pmucapabilities)); + CORE_CC_REG(pmu->base, pmucapabilities)); pub->pmurev = val & PCAP_REV_MASK; pub->pmucaps = val; } @@ -1131,6 +1133,23 @@ struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub) return &cc->pub; } +struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub) +{ + struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub); + struct brcmf_core *pmu; + + /* See if there is separated PMU core available */ + if (cc->rev >= 35 && + pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) { + pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU); + if (pmu) + return pmu; + } + + /* Fallback to ChipCommon core for older hardware */ + return cc; +} + bool brcmf_chip_iscoreup(struct brcmf_core *pub) { struct brcmf_core_priv *core; @@ -1301,6 +1320,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) { u32 base, addr, reg, pmu_cc3_mask = ~0; struct brcmf_chip_priv *chip; + struct brcmf_core *pmu = brcmf_chip_get_pmu(pub); brcmf_dbg(TRACE, "Enter\n"); @@ -1320,9 +1340,9 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) case BRCM_CC_4335_CHIP_ID: case BRCM_CC_4339_CHIP_ID: /* read PMU chipcontrol register 3 */ - addr = CORE_CC_REG(base, chipcontrol_addr); + addr = CORE_CC_REG(pmu->base, chipcontrol_addr); chip->ops->write32(chip->ctx, addr, 3); - addr = CORE_CC_REG(base, chipcontrol_data); + addr = CORE_CC_REG(pmu->base, chipcontrol_data); reg = chip->ops->read32(chip->ctx, addr); return (reg & pmu_cc3_mask) != 0; case BRCM_CC_43430_CHIP_ID: @@ -1330,12 +1350,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) reg = chip->ops->read32(chip->ctx, addr); return reg != 0; default: - addr = CORE_CC_REG(base, pmucapabilities_ext); + addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); reg = chip->ops->read32(chip->ctx, addr); if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0) return false; - addr = CORE_CC_REG(base, retention_ctl); + addr = CORE_CC_REG(pmu->base, retention_ctl); reg = chip->ops->read32(chip->ctx, addr); return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h index cb9145f1786a..dd0ec3eba6a9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h @@ -85,6 +85,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx, void brcmf_chip_detach(struct brcmf_chip *chip); struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid); struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip); +struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub); bool brcmf_chip_iscoreup(struct brcmf_core *core); void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset); void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index dd6614332836..80b5d4784645 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3615,7 +3615,6 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, const struct sdiod_drive_str *str_tab = NULL; u32 str_mask; u32 str_shift; - u32 base; u32 i; u32 drivestrength_sel = 0; u32 cc_data_temp; @@ -3658,14 +3657,15 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev, } if (str_tab != NULL) { + struct brcmf_core *pmu = brcmf_chip_get_pmu(ci); + for (i = 0; str_tab[i].strength != 0; i++) { if (drivestrength >= str_tab[i].strength) { drivestrength_sel = str_tab[i].sel; break; } } - base = brcmf_chip_get_chipcommon(ci)->base; - addr = CORE_CC_REG(base, chipcontrol_addr); + addr = CORE_CC_REG(pmu->base, chipcontrol_addr); brcmf_sdiod_regwl(sdiodev, addr, 1, NULL); cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL); cc_data_temp &= ~str_mask; @@ -3835,8 +3835,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) goto fail; /* set PMUControl so a backplane reset does PMU state reload */ - reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base, - pmucontrol); + reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol); reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err); if (err) goto fail; -- cgit v1.2.3 From f66ab2a7da8ffd78ba26048f8108577ba73a1828 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 26 Jan 2016 17:57:05 +0100 Subject: brcmfmac: add support for 14e4:4365 PCI ID with BCM4366 chipset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On Broadcom ARM routers BCM4366 cards are available with 14e4:4365 ID. Unfortunately this ID was already used by Broadcom for cards with BCM43142, a totally different chipset requiring SoftMAC driver. To avoid a conflict between brcmfmac and bcma use more specific ID entry with subvendor and subdevice specified. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 0480b70e3eb8..d5f9ef470447 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1951,6 +1951,9 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { #define BRCMF_PCIE_DEVICE(dev_id) { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } +#define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \ + BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\ + subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 } static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), @@ -1966,6 +1969,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID), + BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID), -- cgit v1.2.3 From d1e61b86a2b1b99e5dcd4d41aa23d033b83542a1 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Sun, 31 Jan 2016 12:14:34 +0100 Subject: brcmfmac: treat NULL character in NVRAM as separator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Platform NVRAM (stored on a flash partition) has entries separated by a NULL (\0) char. Our parsing code switches from VALUE state to IDLE whenever it meets a NULL (\0). When that happens our IDLE handler should simply consume it and analyze whatever is placed ahead. This fixes harmless warnings spamming debugging output: [ 155.165624] brcmfmac: brcmf_nvram_handle_idle warning: ln=1:col=20: ignoring invalid character [ 155.180806] brcmfmac: brcmf_nvram_handle_idle warning: ln=1:col=44: ignoring invalid character [ 155.195971] brcmfmac: brcmf_nvram_handle_idle warning: ln=1:col=63: ignoring invalid character Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 1365c12b78fc..7269056d0044 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -93,7 +93,7 @@ static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp) c = nvp->data[nvp->pos]; if (c == '\n') return COMMENT; - if (is_whitespace(c)) + if (is_whitespace(c) || c == '\0') goto proceed; if (c == '#') return COMMENT; -- cgit v1.2.3 From 97f1a17109272b9f060cbd6d1d5be41528643000 Mon Sep 17 00:00:00 2001 From: Sjoerd Simons Date: Mon, 25 Jan 2016 11:47:29 +0100 Subject: brcmfmac: sdio: Increase the default timeouts a bit On a Radxa Rock2 board with a Ampak AP6335 (Broadcom 4339 core) it seems the card responds very quickly most of the time, unfortunately during initialisation it sometimes seems to take just a bit over 2 seconds to respond. This results intialization failing with message like: brcmf_c_preinit_dcmds: Retreiving cur_etheraddr failed, -52 brcmf_bus_start: failed: -52 brcmf_sdio_firmware_callback: dongle is not responding Increasing the timeout to allow for a bit more headroom allows the card to initialize reliably. A quick search online after diagnosing/fixing this showed that Google has a similar patch in their ChromeOS tree, so this doesn't seem specific to the board I'm using. Signed-off-by: Sjoerd Simons Reviewed-by: Julian Calaby Acked-by: Arend van Spriel Reviewed-by: Douglas Anderson Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 80b5d4784645..6776e45b2958 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -45,8 +45,8 @@ #include "chip.h" #include "firmware.h" -#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2000) -#define CTL_DONE_TIMEOUT msecs_to_jiffies(2000) +#define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) +#define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) #ifdef DEBUG -- cgit v1.2.3 From 48dc5fb3ba53b20418de8514700f63d88c5de3a3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 28 Jan 2016 22:58:28 +0100 Subject: hostap: avoid uninitialized variable use in hfa384x_get_rid The driver reads a value from hfa384x_from_bap(), which may fail, and then assigns the value to a local variable. gcc detects that in in the failure case, the 'rlen' variable now contains uninitialized data: In file included from ../drivers/net/wireless/intersil/hostap/hostap_pci.c:220:0: drivers/net/wireless/intersil/hostap/hostap_hw.c: In function 'hfa384x_get_rid': drivers/net/wireless/intersil/hostap/hostap_hw.c:842:5: warning: 'rec' may be used uninitialized in this function [-Wmaybe-uninitialized] if (le16_to_cpu(rec.len) == 0) { This restructures the function as suggested by Russell King, to make it more readable and get more reliable error handling, by handling each failure mode using a goto. Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/intersil/hostap/hostap_hw.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c index 6df3ee561d52..515aa3f993f3 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_hw.c +++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c @@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, spin_lock_bh(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, rid, 0); - if (!res) - res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); + if (res) + goto unlock; + + res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); + if (res) + goto unlock; if (le16_to_cpu(rec.len) == 0) { /* RID not available */ res = -ENODATA; + goto unlock; } rlen = (le16_to_cpu(rec.len) - 1) * 2; - if (!res && exact_len && rlen != len) { + if (exact_len && rlen != len) { printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " "rid=0x%04x, len=%d (expected %d)\n", dev->name, rid, rlen, len); res = -ENODATA; } - if (!res) - res = hfa384x_from_bap(dev, BAP0, buf, len); + res = hfa384x_from_bap(dev, BAP0, buf, len); +unlock: spin_unlock_bh(&local->baplock); mutex_unlock(&local->rid_bap_mtx); -- cgit v1.2.3 From 44ca509cb9769f27ad8f7d662910e31329eed46f Mon Sep 17 00:00:00 2001 From: Nachiket Kukade Date: Sun, 31 Jan 2016 23:44:46 -0800 Subject: mwifiex: fix bandwidth display problem Instead of using HT info from beacon IEs, use HT info from association response frame to update bandwidth in cfg80211_get_channel handler. Signed-off-by: Nachiket Kukade Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 4 ++-- drivers/net/wireless/marvell/mwifiex/fw.h | 2 +- drivers/net/wireless/marvell/mwifiex/join.c | 15 +++++++++++++++ drivers/net/wireless/marvell/mwifiex/main.h | 2 ++ drivers/net/wireless/marvell/mwifiex/sta_event.c | 3 +++ 5 files changed, 23 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 84615533986c..f2dce81ba36e 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -3793,8 +3793,8 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy, freq = ieee80211_channel_to_frequency(curr_bss->channel, band); chan = ieee80211_get_channel(wiphy, freq); - if (curr_bss->bcn_ht_oper) { - second_chan_offset = curr_bss->bcn_ht_oper->ht_param & + if (priv->ht_param_present) { + second_chan_offset = priv->assoc_resp_ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET; chan_type = mwifiex_sec_chan_offset_to_chan_type (second_chan_offset); diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 4af916817bcd..c134cf865291 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -1063,7 +1063,7 @@ struct ieee_types_assoc_rsp { __le16 cap_info_bitmap; __le16 status_code; __le16 a_id; - u8 ie_buffer[1]; + u8 ie_buffer[0]; } __packed; struct host_cmd_ds_802_11_associate_rsp { diff --git a/drivers/net/wireless/marvell/mwifiex/join.c b/drivers/net/wireless/marvell/mwifiex/join.c index cc09a81dbf6a..62211fca91b7 100644 --- a/drivers/net/wireless/marvell/mwifiex/join.c +++ b/drivers/net/wireless/marvell/mwifiex/join.c @@ -644,6 +644,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc; bool enable_data = true; u16 cap_info, status_code, aid; + const u8 *ie_ptr; + struct ieee80211_ht_operation *assoc_resp_ht_oper; assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params; @@ -733,6 +735,19 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv, = ((bss_desc->wmm_ie.qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0); + /* Store the bandwidth information from assoc response */ + ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer, + priv->assoc_rsp_size + - sizeof(struct ieee_types_assoc_rsp)); + if (ie_ptr) { + assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr + + sizeof(struct ieee_types_header)); + priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param; + priv->ht_param_present = true; + } else { + priv->ht_param_present = false; + } + mwifiex_dbg(priv->adapter, INFO, "info: ASSOC_RESP: curr_pkt_filter is %#x\n", priv->curr_pkt_filter); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index c755be54cc83..89a1576d0a4b 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -674,6 +674,8 @@ struct mwifiex_private { struct mwifiex_ds_mem_rw mem_rw; struct sk_buff_head bypass_txq; struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX]; + u8 assoc_resp_ht_param; + bool ht_param_present; }; diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c index fd8061c73091..070bce401151 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_event.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c @@ -92,6 +92,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code) priv->is_data_rate_auto = true; priv->data_rate = 0; + priv->assoc_resp_ht_param = 0; + priv->ht_param_present = false; + if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data) mwifiex_hist_data_reset(priv); -- cgit v1.2.3 From efdf0e393e76aa18e9c2ec0ca9b3a71ad410994b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 2 Feb 2016 13:00:05 +0300 Subject: mwifiex: fix a reversed condition The NULL test here is reversed. Fixes: 7d7f07d8c5d3 ('mwifiex: add wowlan net-detect support') Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index fc8d8ca67453..489f7a911a83 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -2196,7 +2196,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, pmatch = adapter->nd_info->matches[idx]; - if (!pmatch) { + if (pmatch) { memset(pmatch, 0, sizeof(*pmatch)); if (chan_band_tlv) { pmatch->n_channels = 1; -- cgit v1.2.3 From 0a7701b4defcebc7ce461355e6d9478df313b084 Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sat, 30 Jan 2016 18:01:51 +0100 Subject: libertas: fix pointer bugs for PS_MODE commands struct cmd_ds_802_11_ps_mode contains the command header and a pointer to it was initialized with data points to the body which leads to mis-interpretation of the cmd_ds_802_11_ps_mode.action member. cmd[0] contains the header, &cmd[1] points beyond that. cmdnode->cmdbuf is a pointer to the command buffer This piece of code was unused since power saving was not enabled. Signed-off-by: Andreas Kemnade Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/cmd.c | 4 ++-- drivers/net/wireless/marvell/libertas/cmdresp.c | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 0387a5b380c8..40467d646270 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -957,7 +957,7 @@ static void lbs_queue_cmd(struct lbs_private *priv, /* Exit_PS command needs to be queued in the header always. */ if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) { - struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf; + struct cmd_ds_802_11_ps_mode *psm = (void *)cmdnode->cmdbuf; if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) { if (priv->psstate != PS_STATE_FULL_POWER) @@ -1387,7 +1387,7 @@ int lbs_execute_next_command(struct lbs_private *priv) * PS command. Ignore it if it is not Exit_PS. * otherwise send it down immediately. */ - struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1]; + struct cmd_ds_802_11_ps_mode *psm = (void *)cmd; lbs_deb_host( "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n", diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index e5442e8956f7..701125f9b060 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -123,7 +123,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len) priv->cmd_timed_out = 0; if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) { - struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1]; + /* struct cmd_ds_802_11_ps_mode also contains + * the header + */ + struct cmd_ds_802_11_ps_mode *psmode = (void *)resp; u16 action = le16_to_cpu(psmode->action); lbs_deb_host( -- cgit v1.2.3 From fae4f9f78ab11937bc45b0f3625efe2e35aed510 Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sat, 30 Jan 2016 18:01:52 +0100 Subject: libertas: check whether bus can do more than polling If a sdio host does not support sdio irqs, polling is used instead. That has an impact on performance. Some functionality should not be enabled then. This add a variable in libertas_priv to indicate that. Signed-off-by: Andreas Kemnade Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/dev.h | 1 + drivers/net/wireless/marvell/libertas/if_sdio.c | 2 +- drivers/net/wireless/marvell/libertas/if_usb.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h index 6bd1608992b0..edf710bc5e77 100644 --- a/drivers/net/wireless/marvell/libertas/dev.h +++ b/drivers/net/wireless/marvell/libertas/dev.h @@ -99,6 +99,7 @@ struct lbs_private { /* Hardware access */ void *card; bool iface_running; + u8 is_polling; /* host has to poll the card irq */ u8 fw_ready; u8 surpriseremoved; u8 setup_fw_on_resume; diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c index 68fd3a9779bd..13eae9ff8c35 100644 --- a/drivers/net/wireless/marvell/libertas/if_sdio.c +++ b/drivers/net/wireless/marvell/libertas/if_sdio.c @@ -1267,7 +1267,7 @@ static int if_sdio_probe(struct sdio_func *func, priv->reset_card = if_sdio_reset_card; priv->power_save = if_sdio_power_save; priv->power_restore = if_sdio_power_restore; - + priv->is_polling = !(func->card->host->caps & MMC_CAP_SDIO_IRQ); ret = if_sdio_power_on(card); if (ret) goto err_activate_card; diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index dff08a2896a3..aba0c9995b14 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -267,6 +267,7 @@ static int if_usb_probe(struct usb_interface *intf, priv->enter_deep_sleep = NULL; priv->exit_deep_sleep = NULL; priv->reset_deep_sleep_wakeup = NULL; + priv->is_polling = false; #ifdef CONFIG_OLPC if (machine_is_olpc()) priv->reset_card = if_usb_reset_olpc_card; -- cgit v1.2.3 From 57954b94cad77c3253beca805cf493861c7d92b5 Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sat, 30 Jan 2016 18:01:53 +0100 Subject: libertas: do not confirm sleep if commands are pending If the main thread gets one PS AWAKE event and one PS SLEEP event in one iteration over event_fifo there will never be checks for commands to be processed, since psstate will always be PS_STATE_SLEEP or PS_STATE_PRE_SLEEP Signed-off-by: Andreas Kemnade Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/cmdresp.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index 701125f9b060..c95bf6dc9522 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -257,6 +257,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event) "EVENT: in FULL POWER mode, ignoring PS_SLEEP\n"); break; } + if (!list_empty(&priv->cmdpendingq)) { + lbs_deb_cmd("EVENT: commands in queue, do not sleep\n"); + break; + } priv->psstate = PS_STATE_PRE_SLEEP; lbs_ps_confirm_sleep(priv); -- cgit v1.2.3 From fada24a5477073dd8fa84baf25ead204dc1dcb18 Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sat, 30 Jan 2016 18:01:54 +0100 Subject: libertas: go back to ps mode without commands pending Removes the old todo block and checks only whether ieee powersave mode is requested. We still have to check for being connected as this powersave mode includes logic for regularly waking up and checking for packets which only makes sense when connected. For not being connected, another mode is needed. Signed-off-by: Andreas Kemnade Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/cmd.c | 36 ++++------------------------- 1 file changed, 5 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c index 40467d646270..4ddd0e5a6b85 100644 --- a/drivers/net/wireless/marvell/libertas/cmd.c +++ b/drivers/net/wireless/marvell/libertas/cmd.c @@ -1428,40 +1428,14 @@ int lbs_execute_next_command(struct lbs_private *priv) * check if in power save mode, if yes, put the device back * to PS mode */ -#ifdef TODO - /* - * This was the old code for libertas+wext. Someone that - * understands this beast should re-code it in a sane way. - * - * I actually don't understand why this is related to WPA - * and to connection status, shouldn't powering should be - * independ of such things? - */ if ((priv->psmode != LBS802_11POWERMODECAM) && (priv->psstate == PS_STATE_FULL_POWER) && - ((priv->connect_status == LBS_CONNECTED) || - lbs_mesh_connected(priv))) { - if (priv->secinfo.WPAenabled || - priv->secinfo.WPA2enabled) { - /* check for valid WPA group keys */ - if (priv->wpa_mcast_key.len || - priv->wpa_unicast_key.len) { - lbs_deb_host( - "EXEC_NEXT_CMD: WPA enabled and GTK_SET" - " go back to PS_SLEEP"); - lbs_set_ps_mode(priv, - PS_MODE_ACTION_ENTER_PS, - false); - } - } else { - lbs_deb_host( - "EXEC_NEXT_CMD: cmdpendingq empty, " - "go back to PS_SLEEP"); - lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, - false); - } + (priv->connect_status == LBS_CONNECTED)) { + lbs_deb_host( + "EXEC_NEXT_CMD: cmdpendingq empty, go back to PS_SLEEP"); + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, + false); } -#endif } ret = 0; -- cgit v1.2.3 From 0b8802dc5f59a4517e4130dc3606eb973760928a Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sat, 30 Jan 2016 18:01:55 +0100 Subject: libertas: fix ps-mode related removal problems When the device is remove e.g. because of going to suspend mode with powersaving enabled, lbs_remove_card tries to exit powersaving state even when already woken up. That command is not processed properly in that situation, since the command processing queue is already stopped, so it waits forever for the command being processed, so disable it. Signed-off-by: Andreas Kemnade Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index 8079560f4965..b35b8bcce24c 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -1060,7 +1060,12 @@ void lbs_remove_card(struct lbs_private *priv) if (priv->psmode == LBS802_11POWERMODEMAX_PSP) { priv->psmode = LBS802_11POWERMODECAM; - lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); + /* no need to wakeup if already woken up, + * on suspend, this exit ps command is not processed + * the driver hangs + */ + if (priv->psstate != PS_STATE_FULL_POWER) + lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true); } if (priv->is_deep_sleep) { -- cgit v1.2.3 From 143e49458424b80fd004963b6ba88ed583ea62da Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Sat, 30 Jan 2016 18:01:56 +0100 Subject: libertas: add an cfg80211 interface for powersaving This patch adds an interface for handling commands like iwconfig wlanX power on/off. Such an interface formerly existed when the driver used wext. While performance with sdio in polling mode without using powersave mode is quite bad, powersaving mode is unusable, so do not enable it under such conditions. Signed-off-by: Andreas Kemnade Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/cfg.c | 38 +++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 86955c416b30..2eea76a340b7 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -2039,6 +2039,43 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev) +int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, + bool enabled, int timeout) +{ + struct lbs_private *priv = wiphy_priv(wiphy); + + if (!(priv->fwcapinfo & FW_CAPINFO_PS)) { + if (!enabled) + return 0; + else + return -EINVAL; + } + /* firmware does not work well with too long latency with power saving + * enabled, so do not enable it if there is only polling, no + * interrupts (like in some sdio hosts which can only + * poll for sdio irqs) + */ + if (priv->is_polling) { + if (!enabled) + return 0; + else + return -EINVAL; + } + if (!enabled) { + priv->psmode = LBS802_11POWERMODECAM; + if (priv->psstate != PS_STATE_FULL_POWER) + lbs_set_ps_mode(priv, + PS_MODE_ACTION_EXIT_PS, + true); + return 0; + } + if (priv->psmode != LBS802_11POWERMODECAM) + return 0; + priv->psmode = LBS802_11POWERMODEMAX_PSP; + if (priv->connect_status == LBS_CONNECTED) + lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, true); + return 0; +} /* * Initialization @@ -2057,6 +2094,7 @@ static struct cfg80211_ops lbs_cfg80211_ops = { .change_virtual_intf = lbs_change_intf, .join_ibss = lbs_join_ibss, .leave_ibss = lbs_leave_ibss, + .set_power_mgmt = lbs_set_power_mgmt, }; -- cgit v1.2.3 From ac2b335e7149e7dd0c8dcbb2b158a17b78c480a1 Mon Sep 17 00:00:00 2001 From: Paul Mcquade Date: Sat, 17 Oct 2015 21:04:39 +0100 Subject: net: wireless: rt2x00: Fixed Spacing issues Removed empty spaces before/after parenthesis Signed-off-by: Paul McQuade Acked-by: Helmut Schaa Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt61pci.h | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h index 1442075a8382..ab8641547a1f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h @@ -138,14 +138,14 @@ #define PAIRWISE_TA_TABLE_BASE 0x1a00 #define SHARED_KEY_ENTRY(__idx) \ - ( SHARED_KEY_TABLE_BASE + \ - ((__idx) * sizeof(struct hw_key_entry)) ) + (SHARED_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry))) #define PAIRWISE_KEY_ENTRY(__idx) \ - ( PAIRWISE_KEY_TABLE_BASE + \ - ((__idx) * sizeof(struct hw_key_entry)) ) + (PAIRWISE_KEY_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_key_entry))) #define PAIRWISE_TA_ENTRY(__idx) \ - ( PAIRWISE_TA_TABLE_BASE + \ - ((__idx) * sizeof(struct hw_pairwise_ta_entry)) ) + (PAIRWISE_TA_TABLE_BASE + \ + ((__idx) * sizeof(struct hw_pairwise_ta_entry))) struct hw_key_entry { u8 key[16]; @@ -180,7 +180,7 @@ struct hw_pairwise_ta_entry { #define HW_BEACON_BASE3 0x2f00 #define HW_BEACON_OFFSET(__index) \ - ( HW_BEACON_BASE0 + (__index * 0x0100) ) + (HW_BEACON_BASE0 + (__index * 0x0100)) /* * HOST-MCU shared memory. @@ -1287,9 +1287,9 @@ struct hw_pairwise_ta_entry { /* * DMA descriptor defines. */ -#define TXD_DESC_SIZE ( 16 * sizeof(__le32) ) -#define TXINFO_SIZE ( 6 * sizeof(__le32) ) -#define RXD_DESC_SIZE ( 16 * sizeof(__le32) ) +#define TXD_DESC_SIZE (16 * sizeof(__le32)) +#define TXINFO_SIZE (6 * sizeof(__le32)) +#define RXD_DESC_SIZE (16 * sizeof(__le32)) /* * TX descriptor format for TX, PRIO and Beacon Ring. -- cgit v1.2.3 From b2cc2dd8ebb885ad1b2ee4c5d35b40d98db211fa Mon Sep 17 00:00:00 2001 From: Paul Mcquade Date: Sat, 17 Oct 2015 22:06:43 +0100 Subject: net: wireless: rt2x00: Space issue Removed empty spaces before/after parenthesis Signed-off-by: Paul McQuade Acked-by: Helmut Schaa Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 3282ddb766f4..8c679f2d668d 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -107,7 +107,7 @@ * amount of bytes needed to move the data. */ #define ALIGN_SIZE(__skb, __header) \ - ( ((unsigned long)((__skb)->data + (__header))) & 3 ) + (((unsigned long)((__skb)->data + (__header))) & 3) /* * Constants for extra TX headroom for alignment purposes. @@ -128,14 +128,14 @@ #define SLOT_TIME 20 #define SHORT_SLOT_TIME 9 #define SIFS 10 -#define PIFS ( SIFS + SLOT_TIME ) -#define SHORT_PIFS ( SIFS + SHORT_SLOT_TIME ) -#define DIFS ( PIFS + SLOT_TIME ) -#define SHORT_DIFS ( SHORT_PIFS + SHORT_SLOT_TIME ) -#define EIFS ( SIFS + DIFS + \ - GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) -#define SHORT_EIFS ( SIFS + SHORT_DIFS + \ - GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) ) +#define PIFS (SIFS + SLOT_TIME) +#define SHORT_PIFS (SIFS + SHORT_SLOT_TIME) +#define DIFS (PIFS + SLOT_TIME) +#define SHORT_DIFS (SHORT_PIFS + SHORT_SLOT_TIME) +#define EIFS (SIFS + DIFS + \ + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10)) +#define SHORT_EIFS (SIFS + SHORT_DIFS + \ + GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10)) enum rt2x00_chip_intf { RT2X00_CHIP_INTF_PCI, -- cgit v1.2.3 From 5b451715e94d43fdb8f3cca67d1ec72184f294f9 Mon Sep 17 00:00:00 2001 From: Paul Mcquade Date: Sat, 17 Oct 2015 22:11:23 +0100 Subject: net: wireless: rt2x00: Space Required Space needed before open parenthesis Signed-off-by: Paul McQuade Acked-by: Helmut Schaa Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00debug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 90fdb02b55e7..25ee3cb8e982 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -629,7 +629,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name, data += sprintf(data, "register\tbase\twords\twordsize\n"); #define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \ { \ - if(debug->__name.read) \ + if (debug->__name.read) \ data += sprintf(data, __stringify(__name) \ "\t%d\t%d\t%d\n", \ debug->__name.word_base, \ @@ -699,7 +699,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ ({ \ - if(debug->__name.read) { \ + if (debug->__name.read) { \ (__intf)->__name##_off_entry = \ debugfs_create_u32(__stringify(__name) "_offset", \ S_IRUSR | S_IWUSR, \ -- cgit v1.2.3 From 6970cd446c25a4634f03c1adf0c12c010235bf3e Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:02 -0800 Subject: mwifiex: display right transmit packet delay drv_pkt_delay_max should be assigned non-zero value, so that packet delay can be accumulate in the right way. Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/main.h | 1 + drivers/net/wireless/marvell/mwifiex/wmm.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 89a1576d0a4b..0433977c4e2f 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -298,6 +298,7 @@ struct mwifiex_tid_tbl { #define WMM_HIGHEST_PRIORITY 7 #define HIGH_PRIO_TID 7 #define LOW_PRIO_TID 0 +#define MWIFIEX_WMM_DRV_DELAY_MAX 510 struct mwifiex_wmm_desc { struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID]; diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c index 499e5a741c62..0eb246502e1d 100644 --- a/drivers/net/wireless/marvell/mwifiex/wmm.c +++ b/drivers/net/wireless/marvell/mwifiex/wmm.c @@ -438,6 +438,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter) mwifiex_set_ba_params(priv); mwifiex_reset_11n_rx_seq_num(priv); + priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX; atomic_set(&priv->wmm.tx_pkts_queued, 0); atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID); } -- cgit v1.2.3 From af05148392f50490c662dccee6c502d9fcba33e2 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:03 -0800 Subject: mwifiex: process pcie io memory read failure case It is observed that ioread32 may fail to read pcie register in certain scenarios, this patch handles these cases. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 918e04954afe..b9fe1813cc4f 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -323,6 +323,8 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data) struct pcie_service_card *card = adapter->card; *data = ioread32(card->pci_mmap1 + reg); + if (*data == 0xffffffff) + return 0xffffffff; return 0; } -- cgit v1.2.3 From 0172404d79ec37e2c7d9dc90f6876dc314dc0fe5 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:04 -0800 Subject: mwifiex: skip firmware dump when read_regs() fails If we are not able to read registers or PCIe memory, it means PCIe device is in bad state. We will skip firmware dump in this case. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index b9fe1813cc4f..a18d944c2f31 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2366,9 +2366,13 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) { int ret, tries; u8 ctrl_data; + u32 fw_status; struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; + if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) + return RDWR_STATUS_FAILURE; + ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); if (ret) { mwifiex_dbg(adapter, ERROR, -- cgit v1.2.3 From 9a86232213b9498e0c0721fc508f760e7a089e59 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:05 -0800 Subject: mwifiex: keep original structure in decl header file memory_type_mapping strucuture did not refer to other mwifiex specific strture. A better software design method would keep it in decl header file, which does not include other mwifiex header file. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/decl.h | 21 +++++++++++++++++++++ drivers/net/wireless/marvell/mwifiex/main.h | 20 -------------------- drivers/net/wireless/marvell/mwifiex/pcie.h | 1 + 3 files changed, 22 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index d9c15cd36f12..c744a7030866 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -270,4 +270,25 @@ struct mwifiex_11h_intf_state { bool is_11h_enabled; bool is_11h_active; } __packed; + +#define MWIFIEX_FW_DUMP_IDX 0xff +#define MWIFIEX_DRV_INFO_IDX 20 +#define FW_DUMP_MAX_NAME_LEN 8 +#define FW_DUMP_HOST_READY 0xEE +#define FW_DUMP_DONE 0xFF +#define FW_DUMP_READ_DONE 0xFE + +struct memory_type_mapping { + u8 mem_name[FW_DUMP_MAX_NAME_LEN]; + u8 *mem_ptr; + u32 mem_size; + u8 done_flag; +}; + +enum rdwr_status { + RDWR_STATUS_SUCCESS = 0, + RDWR_STATUS_FAILURE = 1, + RDWR_STATUS_DONE = 2 +}; + #endif /* !_MWIFIEX_DECL_H_ */ diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 0433977c4e2f..aea7aee46cf7 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -489,26 +489,6 @@ struct mwifiex_roc_cfg { struct ieee80211_channel chan; }; -#define MWIFIEX_FW_DUMP_IDX 0xff -#define MWIFIEX_DRV_INFO_IDX 20 -#define FW_DUMP_MAX_NAME_LEN 8 -#define FW_DUMP_HOST_READY 0xEE -#define FW_DUMP_DONE 0xFF -#define FW_DUMP_READ_DONE 0xFE - -struct memory_type_mapping { - u8 mem_name[FW_DUMP_MAX_NAME_LEN]; - u8 *mem_ptr; - u32 mem_size; - u8 done_flag; -}; - -enum rdwr_status { - RDWR_STATUS_SUCCESS = 0, - RDWR_STATUS_FAILURE = 1, - RDWR_STATUS_DONE = 2 -}; - enum mwifiex_iface_work_flags { MWIFIEX_IFACE_WORK_DEVICE_DUMP, MWIFIEX_IFACE_WORK_CARD_RESET, diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 9700ac355e55..8f190c44cb76 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -26,6 +26,7 @@ #include #include +#include "decl.h" #include "main.h" #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin" -- cgit v1.2.3 From 50632092dfda712dec2232d6ddf75e803c397c70 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:06 -0800 Subject: mwifiex: use an extensible framework for firmware dump solution This patch apply an extensible firmware dump framework, so that other chipset can be easily added as needed. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 35 ++++++++++++----------------- drivers/net/wireless/marvell/mwifiex/pcie.h | 19 ++++++++++++++++ 2 files changed, 33 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index a18d944c2f31..c94d31e1b525 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -37,17 +37,6 @@ static struct mwifiex_if_ops pcie_ops; static struct semaphore add_remove_card_sem; -static struct memory_type_mapping mem_type_mapping_tbl[] = { - {"ITCM", NULL, 0, 0xF0}, - {"DTCM", NULL, 0, 0xF1}, - {"SQRAM", NULL, 0, 0xF2}, - {"IRAM", NULL, 0, 0xF3}, - {"APU", NULL, 0, 0xF4}, - {"CIU", NULL, 0, 0xF5}, - {"ICU", NULL, 0, 0xF6}, - {"MAC", NULL, 0, 0xF7}, -}; - static int mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, size_t size, int flags) @@ -206,6 +195,8 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev, card->pcie.blksz_fw_dl = data->blksz_fw_dl; card->pcie.tx_buf_size = data->tx_buf_size; card->pcie.can_dump_fw = data->can_dump_fw; + card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl; + card->pcie.num_mem_types = data->num_mem_types; card->pcie.can_ext_scan = data->can_ext_scan; } @@ -2373,7 +2364,8 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status)) return RDWR_STATUS_FAILURE; - ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY); + ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, + reg->fw_dump_host_ready); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); @@ -2386,11 +2378,11 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag) return RDWR_STATUS_SUCCESS; if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; - if (ctrl_data != FW_DUMP_HOST_READY) { + if (ctrl_data != reg->fw_dump_host_ready) { mwifiex_dbg(adapter, WARN, "The ctrl reg was changed, re-try again!\n"); ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, - FW_DUMP_HOST_READY); + reg->fw_dump_host_ready); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); @@ -2418,8 +2410,9 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) if (!card->pcie.can_dump_fw) return; - for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { - struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; + for (idx = 0; idx < adapter->num_mem_types; idx++) { + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { vfree(entry->mem_ptr); @@ -2440,8 +2433,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) /* Read the length of every memory which will dump */ for (idx = 0; idx < dump_num; idx++) { - struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; - + struct memory_type_mapping *entry = + &adapter->mem_type_mapping_tbl[idx]; stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); if (stat == RDWR_STATUS_FAILURE) return; @@ -2457,7 +2450,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) if (memory_size == 0) { mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n"); ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl, - FW_DUMP_READ_DONE); + creg->fw_dump_read_done); if (ret) { mwifiex_dbg(adapter, ERROR, "PCIE write err\n"); return; @@ -2762,8 +2755,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter) return -1; adapter->tx_buf_size = card->pcie.tx_buf_size; - adapter->mem_type_mapping_tbl = mem_type_mapping_tbl; - adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl); + adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl; + adapter->num_mem_types = card->pcie.num_mem_types; strcpy(adapter->fw_name, card->pcie.firmware); adapter->ext_scan = card->pcie.can_ext_scan; diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 8f190c44cb76..8bc4390f0e20 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -136,6 +136,8 @@ struct mwifiex_pcie_card_reg { u16 fw_dump_ctrl; u16 fw_dump_start; u16 fw_dump_end; + u8 fw_dump_host_ready; + u8 fw_dump_read_done; u8 msix_support; }; @@ -203,6 +205,8 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = { .fw_dump_ctrl = 0xcf4, .fw_dump_start = 0xcf8, .fw_dump_end = 0xcff, + .fw_dump_host_ready = 0xee, + .fw_dump_read_done = 0xfe, .msix_support = 0, }; @@ -238,12 +242,25 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .msix_support = 1, }; +static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = { + {"ITCM", NULL, 0, 0xF0}, + {"DTCM", NULL, 0, 0xF1}, + {"SQRAM", NULL, 0, 0xF2}, + {"IRAM", NULL, 0, 0xF3}, + {"APU", NULL, 0, 0xF4}, + {"CIU", NULL, 0, 0xF5}, + {"ICU", NULL, 0, 0xF6}, + {"MAC", NULL, 0, 0xF7}, +}; + struct mwifiex_pcie_device { const char *firmware; const struct mwifiex_pcie_card_reg *reg; u16 blksz_fw_dl; u16 tx_buf_size; bool can_dump_fw; + struct memory_type_mapping *mem_type_mapping_tbl; + u8 num_mem_types; bool can_ext_scan; }; @@ -262,6 +279,8 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = { .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, .can_dump_fw = true, + .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897, + .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897), .can_ext_scan = true, }; -- cgit v1.2.3 From 56486026c282983c390b264493a4619d997b86f1 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:07 -0800 Subject: mwifiex: dynamically increase preallocated firmware dump memory size This patch increase firmware dump memory 4K each time, until meet the demand. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/decl.h | 2 ++ drivers/net/wireless/marvell/mwifiex/pcie.c | 21 ++++++++++++++++----- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index c744a7030866..a184f9fdfbcb 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -122,6 +122,8 @@ #define BLOCK_NUMBER_OFFSET 15 #define SDIO_HEADER_OFFSET 28 +#define MWIFIEX_SIZE_4K 0x4000 + enum mwifiex_bss_type { MWIFIEX_BSS_TYPE_STA = 0, MWIFIEX_BSS_TYPE_UAP = 1, diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index c94d31e1b525..a62a09b028e5 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2402,7 +2402,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; unsigned int reg, reg_start, reg_end; - u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0; + u8 *dbg_ptr, *end_ptr, *tmp_ptr, dump_num; + u8 idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; int ret; @@ -2485,11 +2486,21 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) mwifiex_read_reg_byte(adapter, reg, dbg_ptr); if (dbg_ptr < end_ptr) { dbg_ptr++; - } else { - mwifiex_dbg(adapter, ERROR, - "Allocated buf not enough\n"); - return; + continue; } + mwifiex_dbg(adapter, ERROR, + "pre-allocated buf not enough\n"); + tmp_ptr = + vzalloc(memory_size + MWIFIEX_SIZE_4K); + if (!tmp_ptr) + return; + memcpy(tmp_ptr, entry->mem_ptr, memory_size); + vfree(entry->mem_ptr); + entry->mem_ptr = tmp_ptr; + tmp_ptr = NULL; + dbg_ptr = entry->mem_ptr + memory_size; + memory_size += MWIFIEX_SIZE_4K; + end_ptr = entry->mem_ptr + memory_size; } if (stat != RDWR_STATUS_DONE) -- cgit v1.2.3 From 03f0f7cdebeb0872076817ff835ea812a62b37c6 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 2 Feb 2016 22:05:08 -0800 Subject: mwifiex: increase the priority of firmware dump message Firmware dump operation takes few seconds. Hence it's important to notify user in dmesg that firmware dump has started or completed. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index a62a09b028e5..14fe31671475 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2422,7 +2422,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) entry->mem_size = 0; } - mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n"); /* Read the number of the memories which will dump */ stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); @@ -2512,7 +2512,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) break; } while (true); } - mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n"); + mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n"); } static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter) -- cgit v1.2.3 From 11e70824e75f2cfbad9ae066ca5b29e1c361f19e Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 2 Feb 2016 22:05:09 -0800 Subject: mwifiex: firmware dump support for w8997 chipset Current firmware dump solution support w8897 chipset, this patch extend the exist framework with support for w8997 chipset. Trigger firmware dump using, cat /sys/kernel/debug/mwifiex/mlan0/device_dump, data can be obtain by cat /sys/class/devcoredump/devcd*/data > data.txt after that. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/decl.h | 1 + drivers/net/wireless/marvell/mwifiex/pcie.c | 30 +++++++++++++++++++---------- drivers/net/wireless/marvell/mwifiex/pcie.h | 13 ++++++++++++- 3 files changed, 33 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/decl.h b/drivers/net/wireless/marvell/mwifiex/decl.h index a184f9fdfbcb..bec300b9c2ea 100644 --- a/drivers/net/wireless/marvell/mwifiex/decl.h +++ b/drivers/net/wireless/marvell/mwifiex/decl.h @@ -274,6 +274,7 @@ struct mwifiex_11h_intf_state { } __packed; #define MWIFIEX_FW_DUMP_IDX 0xff +#define MWIFIEX_FW_DUMP_MAX_MEMSIZE 0x160000 #define MWIFIEX_DRV_INFO_IDX 20 #define FW_DUMP_MAX_NAME_LEN 8 #define FW_DUMP_HOST_READY 0xEE diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index 14fe31671475..cc072142411a 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2402,7 +2402,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) struct pcie_service_card *card = adapter->card; const struct mwifiex_pcie_card_reg *creg = card->pcie.reg; unsigned int reg, reg_start, reg_end; - u8 *dbg_ptr, *end_ptr, *tmp_ptr, dump_num; + u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num; u8 idx, i, read_reg, doneflag = 0; enum rdwr_status stat; u32 memory_size; @@ -2430,22 +2430,32 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) return; reg = creg->fw_dump_start; - mwifiex_read_reg_byte(adapter, reg, &dump_num); + mwifiex_read_reg_byte(adapter, reg, &fw_dump_num); + + /* W8997 chipset firmware dump will be restore in single region*/ + if (fw_dump_num == 0) + dump_num = 1; + else + dump_num = fw_dump_num; /* Read the length of every memory which will dump */ for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &adapter->mem_type_mapping_tbl[idx]; - stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); - if (stat == RDWR_STATUS_FAILURE) - return; - memory_size = 0; - reg = creg->fw_dump_start; - for (i = 0; i < 4; i++) { - mwifiex_read_reg_byte(adapter, reg, &read_reg); - memory_size |= (read_reg << (i * 8)); + if (fw_dump_num != 0) { + stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag); + if (stat == RDWR_STATUS_FAILURE) + return; + + reg = creg->fw_dump_start; + for (i = 0; i < 4; i++) { + mwifiex_read_reg_byte(adapter, reg, &read_reg); + memory_size |= (read_reg << (i * 8)); reg++; + } + } else { + memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE; } if (memory_size == 0) { diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h index 8bc4390f0e20..29e58ce877e3 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.h +++ b/drivers/net/wireless/marvell/mwifiex/pcie.h @@ -239,6 +239,11 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = { .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR, .pfu_enabled = 1, .sleep_cookie = 0, + .fw_dump_ctrl = 0xcf4, + .fw_dump_start = 0xcf8, + .fw_dump_end = 0xcff, + .fw_dump_host_ready = 0xcc, + .fw_dump_read_done = 0xdd, .msix_support = 1, }; @@ -253,6 +258,10 @@ static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = { {"MAC", NULL, 0, 0xF7}, }; +static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = { + {"DUMP", NULL, 0, 0xDD}, +}; + struct mwifiex_pcie_device { const char *firmware; const struct mwifiex_pcie_card_reg *reg; @@ -289,7 +298,9 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = { .reg = &mwifiex_reg_8997, .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD, .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, - .can_dump_fw = false, + .can_dump_fw = true, + .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997, + .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997), .can_ext_scan = true, }; -- cgit v1.2.3 From 1a8496ba409132afb3b407599061c34847de42d3 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 2 Feb 2016 18:09:14 +0100 Subject: vxlan: consolidate output route calculation The code for output route lookup is duplicated for ndo_start_xmit and ndo_fill_metadata_dst. Move it to a common function. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 77 +++++++++++++++++++++++++---------------------------- 1 file changed, 37 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 65439188c582..d0f7723fd776 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1847,6 +1847,27 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk return 0; } +static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, + struct sk_buff *skb, int oif, u8 tos, + __be32 daddr, __be32 *saddr) +{ + struct rtable *rt = NULL; + struct flowi4 fl4; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = oif; + fl4.flowi4_tos = RT_TOS(tos); + fl4.flowi4_mark = skb->mark; + fl4.flowi4_proto = IPPROTO_UDP; + fl4.daddr = daddr; + fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; + + rt = ip_route_output_key(vxlan->net, &fl4); + if (!IS_ERR(rt)) + *saddr = fl4.saddr; + return rt; +} + #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, @@ -1928,7 +1949,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct sock *sk; struct rtable *rt = NULL; const struct iphdr *old_iph; - struct flowi4 fl4; union vxlan_addr *dst; union vxlan_addr remote_ip; struct vxlan_metadata _md; @@ -1995,6 +2015,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } if (dst->sa.sa_family == AF_INET) { + __be32 saddr; + if (!vxlan->vn4_sock) goto drop; sk = vxlan->vn4_sock->sock->sk; @@ -2009,15 +2031,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, flags &= ~VXLAN_F_UDP_CSUM; } - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; - fl4.flowi4_tos = RT_TOS(tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = dst->sin.sin_addr.s_addr; - fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; - - rt = ip_route_output_key(vxlan->net, &fl4); + rt = vxlan_get_route(vxlan, skb, + rdst ? rdst->remote_ifindex : 0, tos, + dst->sin.sin_addr.s_addr, &saddr); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &dst->sin.sin_addr.s_addr); @@ -2049,7 +2065,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr, + err = vxlan_xmit_skb(rt, sk, skb, saddr, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), @@ -2390,31 +2406,6 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, - struct ip_tunnel_info *info, - __be16 sport, __be16 dport) -{ - struct vxlan_dev *vxlan = netdev_priv(dev); - struct rtable *rt; - struct flowi4 fl4; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_tos = RT_TOS(info->key.tos); - fl4.flowi4_mark = skb->mark; - fl4.flowi4_proto = IPPROTO_UDP; - fl4.daddr = info->key.u.ipv4.dst; - - rt = ip_route_output_key(vxlan->net, &fl4); - if (IS_ERR(rt)) - return PTR_ERR(rt); - ip_rt_put(rt); - - info->key.u.ipv4.src = fl4.saddr; - info->key.tp_src = sport; - info->key.tp_dst = dport; - return 0; -} - static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -2426,9 +2417,16 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) dport = info->key.tp_dst ? : vxlan->cfg.dst_port; if (ip_tunnel_info_af(info) == AF_INET) { + struct rtable *rt; + if (!vxlan->vn4_sock) return -EINVAL; - return egress_ipv4_tun_info(dev, skb, info, sport, dport); + rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, + info->key.u.ipv4.dst, + &info->key.u.ipv4.src); + if (IS_ERR(rt)) + return PTR_ERR(rt); + ip_rt_put(rt); } else { #if IS_ENABLED(CONFIG_IPV6) struct dst_entry *ndst; @@ -2441,13 +2439,12 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); - - info->key.tp_src = sport; - info->key.tp_dst = dport; #else /* !CONFIG_IPV6 */ return -EPFNOSUPPORT; #endif } + info->key.tp_src = sport; + info->key.tp_dst = dport; return 0; } -- cgit v1.2.3 From b4ed5cad24c1072033efbffa680c84c9ba19c798 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 2 Feb 2016 18:09:15 +0100 Subject: vxlan: consolidate csum flag handling The flag for tx checksumming for tunneling over IPv4 and IPv6 is different. Decide whether to do tx checksumming in vxlan_xmit_one and pass it on as a separate flag. This will allow for tx path consolidation in the next patch. Unfortunately, gcc is not clever enough to see that udp_sum is always initialized and gives an uninitialized variable warning. Set it to false to silence the warning. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 37 ++++++++++++++++--------------------- 1 file changed, 16 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d0f7723fd776..fe3fd4808f4d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1690,12 +1690,13 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port, __be32 vni, - struct vxlan_metadata *md, bool xnet, u32 vxflags) + struct vxlan_metadata *md, bool xnet, u32 vxflags, + bool udp_sum) { struct vxlanhdr *vxh; int min_headroom; int err; - bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX); + bool nocheck = !udp_sum; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; u16 hdrlen = sizeof(struct vxlanhdr); @@ -1763,8 +1764,7 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, skb_set_inner_protocol(skb, htons(ETH_P_TEB)); udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio, - ttl, src_port, dst_port, - !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX)); + ttl, src_port, dst_port, nocheck); return 0; err: dst_release(dst); @@ -1775,12 +1775,13 @@ err: static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, __be32 vni, - struct vxlan_metadata *md, bool xnet, u32 vxflags) + struct vxlan_metadata *md, bool xnet, u32 vxflags, + bool udp_sum) { struct vxlanhdr *vxh; int min_headroom; int err; - bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM); + bool nocheck = !udp_sum; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; u16 hdrlen = sizeof(struct vxlanhdr); @@ -1842,8 +1843,7 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk skb_set_inner_protocol(skb, htons(ETH_P_TEB)); udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df, - src_port, dst_port, xnet, - !(vxflags & VXLAN_F_UDP_CSUM)); + src_port, dst_port, xnet, nocheck); return 0; } @@ -1959,6 +1959,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, __u8 tos, ttl; int err; u32 flags = vxlan->flags; + bool udp_sum = false; info = skb_tunnel_info(skb); @@ -2007,6 +2008,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (info) { ttl = info->key.ttl; tos = info->key.tos; + udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); if (info->options_len) md = ip_tunnel_info_opts(info); @@ -2024,11 +2026,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (info) { if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) df = htons(IP_DF); - - if (info->key.tun_flags & TUNNEL_CSUM) - flags |= VXLAN_F_UDP_CSUM; - else - flags &= ~VXLAN_F_UDP_CSUM; + } else { + udp_sum = !!(flags & VXLAN_F_UDP_CSUM); } rt = vxlan_get_route(vxlan, skb, @@ -2069,7 +2068,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst->sin.sin_addr.s_addr, tos, ttl, df, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), - flags); + flags, udp_sum); if (err < 0) { /* skb is already freed. */ skb = NULL; @@ -2119,18 +2118,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } - if (info) { - if (info->key.tun_flags & TUNNEL_CSUM) - flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX; - else - flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; - } + if (!info) + udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); ttl = ttl ? : ip6_dst_hoplimit(ndst); err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, 0, ttl, src_port, dst_port, htonl(vni << 8), md, !net_eq(vxlan->net, dev_net(vxlan->dev)), - flags); + flags, udp_sum); #endif } -- cgit v1.2.3 From f491e56dba511d318f52efa4c226471bf5943e88 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 2 Feb 2016 18:09:16 +0100 Subject: vxlan: consolidate vxlan_xmit_skb and vxlan6_xmit_skb There's a lot of code duplication. Factor out the duplicate code to a new function shared between IPv4 and IPv6 xmit path. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 137 ++++++++++------------------------------------------ 1 file changed, 26 insertions(+), 111 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fe3fd4808f4d..65f52472a52c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1684,19 +1684,14 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK); } -#if IS_ENABLED(CONFIG_IPV6) -static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, - struct sk_buff *skb, - struct net_device *dev, struct in6_addr *saddr, - struct in6_addr *daddr, __u8 prio, __u8 ttl, - __be16 src_port, __be16 dst_port, __be32 vni, - struct vxlan_metadata *md, bool xnet, u32 vxflags, +static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, + int iphdr_len, __be32 vni, + struct vxlan_metadata *md, u32 vxflags, bool udp_sum) { struct vxlanhdr *vxh; int min_headroom; int err; - bool nocheck = !udp_sum; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; u16 hdrlen = sizeof(struct vxlanhdr); @@ -1713,93 +1708,8 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk, } } - skb_scrub_packet(skb, xnet); - min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len - + VXLAN_HLEN + sizeof(struct ipv6hdr) - + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); - - /* Need space for new headers (invalidates iph ptr) */ - err = skb_cow_head(skb, min_headroom); - if (unlikely(err)) { - kfree_skb(skb); - goto err; - } - - skb = vlan_hwaccel_push_inside(skb); - if (WARN_ON(!skb)) { - err = -ENOMEM; - goto err; - } - - skb = iptunnel_handle_offloads(skb, udp_sum, type); - if (IS_ERR(skb)) { - err = -EINVAL; - goto err; - } - - vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); - vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = vni; - - if (type & SKB_GSO_TUNNEL_REMCSUM) { - u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> - VXLAN_RCO_SHIFT; - - if (skb->csum_offset == offsetof(struct udphdr, check)) - data |= VXLAN_RCO_UDP; - - vxh->vx_vni |= htonl(data); - vxh->vx_flags |= htonl(VXLAN_HF_RCO); - - if (!skb_is_gso(skb)) { - skb->ip_summed = CHECKSUM_NONE; - skb->encapsulation = 0; - } - } - - if (vxflags & VXLAN_F_GBP) - vxlan_build_gbp_hdr(vxh, vxflags, md); - - skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - - udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio, - ttl, src_port, dst_port, nocheck); - return 0; -err: - dst_release(dst); - return err; -} -#endif - -static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, - __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, - __be16 src_port, __be16 dst_port, __be32 vni, - struct vxlan_metadata *md, bool xnet, u32 vxflags, - bool udp_sum) -{ - struct vxlanhdr *vxh; - int min_headroom; - int err; - bool nocheck = !udp_sum; - int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - u16 hdrlen = sizeof(struct vxlanhdr); - - if ((vxflags & VXLAN_F_REMCSUM_TX) && - skb->ip_summed == CHECKSUM_PARTIAL) { - int csum_start = skb_checksum_start_offset(skb); - - if (csum_start <= VXLAN_MAX_REMCSUM_START && - !(csum_start & VXLAN_RCO_SHIFT_MASK) && - (skb->csum_offset == offsetof(struct udphdr, check) || - skb->csum_offset == offsetof(struct tcphdr, check))) { - udp_sum = false; - type |= SKB_GSO_TUNNEL_REMCSUM; - } - } - - min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len - + VXLAN_HLEN + sizeof(struct iphdr) + + VXLAN_HLEN + iphdr_len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); /* Need space for new headers (invalidates iph ptr) */ @@ -1841,9 +1751,6 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk vxlan_build_gbp_hdr(vxh, vxflags, md); skb_set_inner_protocol(skb, htons(ETH_P_TEB)); - - udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df, - src_port, dst_port, xnet, nocheck); return 0; } @@ -1960,6 +1867,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, int err; u32 flags = vxlan->flags; bool udp_sum = false; + bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev)); info = skb_tunnel_info(skb); @@ -2064,16 +1972,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); - err = vxlan_xmit_skb(rt, sk, skb, saddr, - dst->sin.sin_addr.s_addr, tos, ttl, df, - src_port, dst_port, htonl(vni << 8), md, - !net_eq(vxlan->net, dev_net(vxlan->dev)), - flags, udp_sum); - if (err < 0) { - /* skb is already freed. */ - skb = NULL; - goto rt_tx_error; - } + err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), + htonl(vni << 8), md, flags, udp_sum); + if (err < 0) + goto xmit_tx_error; + + udp_tunnel_xmit_skb(rt, sk, skb, saddr, + dst->sin.sin_addr.s_addr, tos, ttl, df, + src_port, dst_port, xnet, !udp_sum); #if IS_ENABLED(CONFIG_IPV6) } else { struct dst_entry *ndst; @@ -2122,10 +2028,16 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); ttl = ttl ? : ip6_dst_hoplimit(ndst); - err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, - 0, ttl, src_port, dst_port, htonl(vni << 8), md, - !net_eq(vxlan->net, dev_net(vxlan->dev)), - flags, udp_sum); + skb_scrub_packet(skb, xnet); + err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), + htonl(vni << 8), md, flags, udp_sum); + if (err < 0) { + dst_release(ndst); + return; + } + udp_tunnel6_xmit_skb(ndst, sk, skb, dev, + &saddr, &dst->sin6.sin6_addr, + 0, ttl, src_port, dst_port, !udp_sum); #endif } @@ -2135,6 +2047,9 @@ drop: dev->stats.tx_dropped++; goto tx_free; +xmit_tx_error: + /* skb is already freed. */ + skb = NULL; rt_tx_error: ip_rt_put(rt); tx_error: -- cgit v1.2.3 From a5a773a54e59538d03fd1a20facd7214c030b1d3 Mon Sep 17 00:00:00 2001 From: Suresh Reddy Date: Wed, 3 Feb 2016 09:49:16 +0530 Subject: be2net: return error status from be_set_phys_id() be_set_phys_id() returns 0 to ethtool when the command fails in the FW. This patch fixes the set_phys_id() to return -EIO in case the FW cmd fails. Signed-off-by: Suresh Reddy Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_ethtool.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index a19ac441336f..2ff691636dac 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -720,29 +720,32 @@ static int be_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct be_adapter *adapter = netdev_priv(netdev); + int status = 0; switch (state) { case ETHTOOL_ID_ACTIVE: - be_cmd_get_beacon_state(adapter, adapter->hba_port_num, - &adapter->beacon_state); - return 1; /* cycle on/off once per second */ + status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num, + &adapter->beacon_state); + if (status) + return be_cmd_status(status); + return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - BEACON_STATE_ENABLED); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, BEACON_STATE_ENABLED); break; case ETHTOOL_ID_OFF: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - BEACON_STATE_DISABLED); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, BEACON_STATE_DISABLED); break; case ETHTOOL_ID_INACTIVE: - be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, - adapter->beacon_state); + status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, + 0, 0, adapter->beacon_state); } - return 0; + return be_cmd_status(status); } static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump) -- cgit v1.2.3 From fa5c867d4df275a2c11a455043848574e80a8b32 Mon Sep 17 00:00:00 2001 From: Suresh Reddy Date: Wed, 3 Feb 2016 09:49:17 +0530 Subject: be2net: check for INSUFFICIENT_PRIVILEGES error The driver currently logs the message "VF is not privileged to issue opcode" by checking only the base_status field for UNAUTHORIZED_REQUEST. Add check to look for INSUFFICIENT_PRIVILEGES in the additional status field also as not all cmds fail with that base status. Signed-off-by: Suresh Reddy Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_cmds.c | 3 ++- drivers/net/ethernet/emulex/benet/be_cmds.h | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index b63d8ad2e115..3b665f16d2aa 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -236,7 +236,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter, if (base_status != MCC_STATUS_SUCCESS && !be_skip_err_log(opcode, base_status, addl_status)) { - if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { + if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST || + addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) { dev_warn(&adapter->pdev->dev, "VF is not privileged to issue opcode %d-%d\n", opcode, subsystem); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 241819b36ca7..f260ef3329a1 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -68,7 +68,8 @@ enum mcc_addl_status { MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a, MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab, MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56, - MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57 + MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57, + MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60 }; #define CQE_BASE_STATUS_MASK 0xFFFF -- cgit v1.2.3 From 41dcdfbd0944a3ef08224a8c9ba3b2fdeae0dd86 Mon Sep 17 00:00:00 2001 From: Sriharsha Basavapatna Date: Wed, 3 Feb 2016 09:49:18 +0530 Subject: be2net: Fix be_vlan_rem_vid() to check vlan id being removed The driver decrements its vlan count without checking if it is really present in its list. This results in an invalid vlan count and impacts subsequent vlan add/rem ops. The function be_vlan_rem_vid() should be updated to fix this. Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index f99de3657ce3..09e6f2cdfc90 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1463,6 +1463,9 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid) if (lancer_chip(adapter) && vid == 0) return 0; + if (!test_bit(vid, adapter->vids)) + return 0; + clear_bit(vid, adapter->vids); adapter->vlans_added--; -- cgit v1.2.3 From ee9ad2802449c0a6811a74af2ce30500895d137b Mon Sep 17 00:00:00 2001 From: Sriharsha Basavapatna Date: Wed, 3 Feb 2016 09:49:19 +0530 Subject: be2net: SRIOV Queue distribution should factor in EQ-count of VFs The SRIOV resource distribution logic for RX/TX queue counts is not optimal when a small number of VFs are enabled. It does not take into account the VF's EQ count while computing the queue counts. Because of this, the VF gets a large number of queues, though it doesn't have sufficient EQs, resulting in wasted queue resources. And the PF gets a smaller share of queues though it has more EQs. Fix this by capping the VF queue count at its EQ count. Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 4 ++++ drivers/net/ethernet/emulex/benet/be_main.c | 15 ++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index cf837831304b..de88c30bc029 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -89,6 +89,10 @@ #define BE3_MAX_TX_QS 16 #define BE3_MAX_EVT_QS 16 #define BE3_SRIOV_MAX_EVT_QS 8 +#define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs + * and at least 1 is granted to either + * SURF/DPDK + */ #define MAX_RSS_IFACES 15 #define MAX_RX_QS 32 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 09e6f2cdfc90..62f6fbb8b0f1 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3792,18 +3792,15 @@ static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs) struct be_resources res = adapter->pool_res; u16 num_vf_qs = 1; - /* Distribute the queue resources equally among the PF and it's VFs + /* Distribute the queue resources among the PF and it's VFs * Do not distribute queue resources in multi-channel configuration. */ if (num_vfs && !be_is_mc(adapter)) { - /* If number of VFs requested is 8 less than max supported, - * assign 8 queue pairs to the PF and divide the remaining - * resources evenly among the VFs - */ - if (num_vfs < (be_max_vfs(adapter) - 8)) - num_vf_qs = (res.max_rss_qs - 8) / num_vfs; - else - num_vf_qs = res.max_rss_qs / num_vfs; + /* Divide the qpairs evenly among the VFs and the PF, capped + * at VF-EQ-count. Any remainder qpairs belong to the PF. + */ + num_vf_qs = min(SH_VF_MAX_NIC_EQS, + res.max_rss_qs / (num_vfs + 1)); /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable * interfaces per port. Provide RSS on VFs, only if number -- cgit v1.2.3 From 2e365b1b80aa98655a5582dbb9bf6cf8c0ff268c Mon Sep 17 00:00:00 2001 From: Somnath Kotur Date: Wed, 3 Feb 2016 09:49:20 +0530 Subject: be2net: Don't run ethtool self-tests for VFs The CMD_SUBSYSTEM_LOWLEVEL cmds need DEV_CFG Privilege to run which VFs don't have by default. Self-tests need to be issued only for PFs. Signed-off-by: Somnath Kotur Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_cmds.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 3b665f16d2aa..7d51d4733890 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -65,7 +65,22 @@ static struct be_cmd_priv_map cmd_priv_map[] = { CMD_SUBSYSTEM_COMMON, BE_PRIV_LNKMGMT | BE_PRIV_VHADM | BE_PRIV_DEVCFG | BE_PRIV_DEVSEC - } + }, + { + OPCODE_LOWLEVEL_HOST_DDR_DMA, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_LOWLEVEL_LOOPBACK_TEST, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, + { + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, + CMD_SUBSYSTEM_LOWLEVEL, + BE_PRIV_DEVCFG | BE_PRIV_DEVSEC + }, }; static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) @@ -3169,6 +3184,10 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, struct be_cmd_req_set_lmode *req; int status; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3214,6 +3233,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, struct be_cmd_resp_loopback_test *resp; int status; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); @@ -3260,6 +3283,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, int status; int i, j = 0; + if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA, + CMD_SUBSYSTEM_LOWLEVEL)) + return -EPERM; + spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); -- cgit v1.2.3 From 1babbad46f4aba1c242e43e27c5b36a62311b2d4 Mon Sep 17 00:00:00 2001 From: Padmanabh Ratnakar Date: Wed, 3 Feb 2016 09:49:21 +0530 Subject: be2net: Fix Lancer error recovery After error is detected, wait for adapter to move to ready state before destroying queues and cleanup of other resources. Also skip performing any cleanup for non-Lancer chips and move debug messages to correct routine. Signed-off-by: Padmanabh Ratnakar Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 51 +++++++++++++++++++---------- 1 file changed, 34 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 62f6fbb8b0f1..6eb3aba832fc 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4859,21 +4859,27 @@ static int be_resume(struct be_adapter *adapter) static int be_err_recover(struct be_adapter *adapter) { - struct device *dev = &adapter->pdev->dev; int status; + /* Error recovery is supported only Lancer as of now */ + if (!lancer_chip(adapter)) + return -EIO; + + /* Wait for adapter to reach quiescent state before + * destroying queues + */ + status = be_fw_wait_ready(adapter); + if (status) + goto err; + + be_cleanup(adapter); + status = be_resume(adapter); if (status) goto err; - dev_info(dev, "Adapter recovery successful\n"); return 0; err: - if (be_physfn(adapter)) - dev_err(dev, "Adapter recovery failed\n"); - else - dev_err(dev, "Re-trying adapter recovery\n"); - return status; } @@ -4882,21 +4888,32 @@ static void be_err_detection_task(struct work_struct *work) struct be_adapter *adapter = container_of(work, struct be_adapter, be_err_detection_work.work); - int status = 0; + struct device *dev = &adapter->pdev->dev; + int recovery_status; be_detect_error(adapter); - if (be_check_error(adapter, BE_ERROR_HW)) { - be_cleanup(adapter); - - /* As of now error recovery support is in Lancer only */ - if (lancer_chip(adapter)) - status = be_err_recover(adapter); + if (be_check_error(adapter, BE_ERROR_HW)) + recovery_status = be_err_recover(adapter); + else + goto reschedule_task; + + if (!recovery_status) { + dev_info(dev, "Adapter recovery successful\n"); + goto reschedule_task; + } else if (be_virtfn(adapter)) { + /* For VFs, check if PF have allocated resources + * every second. + */ + dev_err(dev, "Re-trying adapter recovery\n"); + goto reschedule_task; + } else { + dev_err(dev, "Adapter recovery failed\n"); } - /* Always attempt recovery on VFs */ - if (!status || be_virtfn(adapter)) - be_schedule_err_detection(adapter); + return; +reschedule_task: + be_schedule_err_detection(adapter); } static void be_log_sfp_info(struct be_adapter *adapter) -- cgit v1.2.3 From 972f37b424e65f9ef1ce143b8658d9ed50db9f42 Mon Sep 17 00:00:00 2001 From: Padmanabh Ratnakar Date: Wed, 3 Feb 2016 09:49:22 +0530 Subject: be2net: Add retry in case of error recovery failure Retry error recovery MAX_ERR_RECOVERY_RETRY_COUNT times in case of failure during error recovery. Signed-off-by: Padmanabh Ratnakar Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 5 +++++ drivers/net/ethernet/emulex/benet/be_main.c | 23 +++++++++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index de88c30bc029..515e206589cc 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -397,6 +397,10 @@ enum vf_state { #define BE_UC_PMAC_COUNT 30 #define BE_VF_UC_PMAC_COUNT 2 +#define MAX_ERR_RECOVERY_RETRY_COUNT 3 +#define ERR_DETECTION_DELAY 1000 +#define ERR_RECOVERY_RETRY_DELAY 30000 + /* Ethtool set_dump flags */ #define LANCER_INITIATE_FW_DUMP 0x1 #define LANCER_DELETE_FW_DUMP 0x2 @@ -534,6 +538,7 @@ struct be_adapter { u16 work_counter; struct delayed_work be_err_detection_work; + u8 recovery_retries; u8 err_flags; u32 flags; u32 cmd_privileges; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 6eb3aba832fc..d5286d3c9356 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4265,10 +4265,10 @@ static void be_schedule_worker(struct be_adapter *adapter) adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; } -static void be_schedule_err_detection(struct be_adapter *adapter) +static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay) { schedule_delayed_work(&adapter->be_err_detection_work, - msecs_to_jiffies(1000)); + msecs_to_jiffies(delay)); adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED; } @@ -4890,6 +4890,7 @@ static void be_err_detection_task(struct work_struct *work) be_err_detection_work.work); struct device *dev = &adapter->pdev->dev; int recovery_status; + int delay = ERR_DETECTION_DELAY; be_detect_error(adapter); @@ -4899,6 +4900,7 @@ static void be_err_detection_task(struct work_struct *work) goto reschedule_task; if (!recovery_status) { + adapter->recovery_retries = 0; dev_info(dev, "Adapter recovery successful\n"); goto reschedule_task; } else if (be_virtfn(adapter)) { @@ -4907,13 +4909,22 @@ static void be_err_detection_task(struct work_struct *work) */ dev_err(dev, "Re-trying adapter recovery\n"); goto reschedule_task; + } else if (adapter->recovery_retries++ < + MAX_ERR_RECOVERY_RETRY_COUNT) { + /* In case of another error during recovery, it takes 30 sec + * for adapter to come out of error. Retry error recovery after + * this time interval. + */ + dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n"); + delay = ERR_RECOVERY_RETRY_DELAY; + goto reschedule_task; } else { dev_err(dev, "Adapter recovery failed\n"); } return; reschedule_task: - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, delay); } static void be_log_sfp_info(struct be_adapter *adapter) @@ -5309,7 +5320,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id) be_roce_dev_add(adapter); - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); /* On Die temperature not supported for VF. */ if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) { @@ -5376,7 +5387,7 @@ static int be_pci_resume(struct pci_dev *pdev) if (status) return status; - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); if (adapter->wol_en) be_setup_wol(adapter, false); @@ -5476,7 +5487,7 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; - be_schedule_err_detection(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); return; err: dev_err(&adapter->pdev->dev, "EEH resume failed\n"); -- cgit v1.2.3 From 3c0d49aaa67e1990f4c081892e9bb69070853919 Mon Sep 17 00:00:00 2001 From: Padmanabh Ratnakar Date: Wed, 3 Feb 2016 09:49:23 +0530 Subject: be2net: Fix interval calculation in interrupt moderation Interrupt moderation parameters need to be recalculated only after a time interval of 1 ms. Interval calculation is wrong when there is a rollover of jiffies. Using recommended way of interval calculation using jiffies to fix this. Signed-off-by: Padmanabh Ratnakar Signed-off-by: Sriharsha Basavapatna Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index d5286d3c9356..9c1fc9dcea25 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1917,8 +1917,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo) if (!aic->enable) return 0; - if (time_before_eq(now, aic->jiffies) || - jiffies_to_msecs(now - aic->jiffies) < 1) + if (jiffies_to_msecs(now - aic->jiffies) < 1) eqd = aic->prev_eqd; else eqd = be_get_new_eqd(eqo); -- cgit v1.2.3 From 22fae97d863679994b951799dd4bbe7afd95897b Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Tue, 2 Feb 2016 11:55:05 +0000 Subject: xen-netback: implement dynamic multicast control My recent patch to the Xen Project documents a protocol for 'dynamic multicast control' in netif.h. This extends the previous multicast control protocol to not require a shared ring reconnection to turn the feature off. Instead the backend watches the "request-multicast-control" key in xenstore and turns the feature off if the key value is written to zero. This patch adds support for dynamic multicast control in xen-netback. Signed-off-by: Paul Durrant Cc: Ian Campbell Cc: Wei Liu Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 1 + drivers/net/xen-netback/xenbus.c | 89 ++++++++++++++++++++++++++++++++++++---- 2 files changed, 82 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 0333ab0fd926..112825200d41 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -251,6 +251,7 @@ struct xenvif { unsigned int stalled_queues; struct xenbus_watch credit_watch; + struct xenbus_watch mcast_ctrl_watch; spinlock_t lock; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 56ebd8267386..39a303de20dd 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -327,7 +327,7 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } - /* We support multicast-control. */ + /* We support dynamic multicast-control. */ err = xenbus_printf(xbt, dev->nodename, "feature-multicast-control", "%d", 1); if (err) { @@ -335,6 +335,14 @@ static int netback_probe(struct xenbus_device *dev, goto abort_transaction; } + err = xenbus_printf(xbt, dev->nodename, + "feature-dynamic-multicast-control", + "%d", 1); + if (err) { + message = "writing feature-dynamic-multicast-control"; + goto abort_transaction; + } + err = xenbus_transaction_end(xbt, 0); } while (err == -EAGAIN); @@ -683,7 +691,8 @@ static void xen_net_rate_changed(struct xenbus_watch *watch, } } -static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) +static int xen_register_credit_watch(struct xenbus_device *dev, + struct xenvif *vif) { int err = 0; char *node; @@ -708,7 +717,7 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif) return err; } -static void xen_unregister_watchers(struct xenvif *vif) +static void xen_unregister_credit_watch(struct xenvif *vif) { if (vif->credit_watch.node) { unregister_xenbus_watch(&vif->credit_watch); @@ -717,6 +726,75 @@ static void xen_unregister_watchers(struct xenvif *vif) } } +static void xen_mcast_ctrl_changed(struct xenbus_watch *watch, + const char **vec, unsigned int len) +{ + struct xenvif *vif = container_of(watch, struct xenvif, + mcast_ctrl_watch); + struct xenbus_device *dev = xenvif_to_xenbus_device(vif); + int val; + + if (xenbus_scanf(XBT_NIL, dev->otherend, + "request-multicast-control", "%d", &val) < 0) + val = 0; + vif->multicast_control = !!val; +} + +static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev, + struct xenvif *vif) +{ + int err = 0; + char *node; + unsigned maxlen = strlen(dev->otherend) + + sizeof("/request-multicast-control"); + + if (vif->mcast_ctrl_watch.node) { + pr_err_ratelimited("Watch is already registered\n"); + return -EADDRINUSE; + } + + node = kmalloc(maxlen, GFP_KERNEL); + if (!node) { + pr_err("Failed to allocate memory for watch\n"); + return -ENOMEM; + } + snprintf(node, maxlen, "%s/request-multicast-control", + dev->otherend); + vif->mcast_ctrl_watch.node = node; + vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed; + err = register_xenbus_watch(&vif->mcast_ctrl_watch); + if (err) { + pr_err("Failed to set watcher %s\n", + vif->mcast_ctrl_watch.node); + kfree(node); + vif->mcast_ctrl_watch.node = NULL; + vif->mcast_ctrl_watch.callback = NULL; + } + return err; +} + +static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif) +{ + if (vif->mcast_ctrl_watch.node) { + unregister_xenbus_watch(&vif->mcast_ctrl_watch); + kfree(vif->mcast_ctrl_watch.node); + vif->mcast_ctrl_watch.node = NULL; + } +} + +static void xen_register_watchers(struct xenbus_device *dev, + struct xenvif *vif) +{ + xen_register_credit_watch(dev, vif); + xen_register_mcast_ctrl_watch(dev, vif); +} + +static void xen_unregister_watchers(struct xenvif *vif) +{ + xen_unregister_mcast_ctrl_watch(vif); + xen_unregister_credit_watch(vif); +} + static void unregister_hotplug_status_watch(struct backend_info *be) { if (be->have_hotplug_status_watch) { @@ -1030,11 +1108,6 @@ static int read_xenbus_vif_flags(struct backend_info *be) val = 0; vif->ipv6_csum = !!val; - if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control", - "%d", &val) < 0) - val = 0; - vif->multicast_control = !!val; - return 0; } -- cgit v1.2.3 From 67eb03318bc5fe170ae832423fda7a23b0d801cf Mon Sep 17 00:00:00 2001 From: David Ahern Date: Tue, 2 Feb 2016 07:43:45 -0800 Subject: net: Add support for fill_slave_info to VRF device Allows userspace to have direct access to VRF table association versus looking up master device and its table. Signed-off-by: David Ahern Signed-off-by: David S. Miller --- drivers/net/vrf.c | 21 +++++++++++++++++++++ include/uapi/linux/if_link.h | 8 ++++++++ 2 files changed, 29 insertions(+) (limited to 'drivers') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 66addb7a7911..76e1fc9d8748 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -877,6 +877,24 @@ static int vrf_fillinfo(struct sk_buff *skb, return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id); } +static size_t vrf_get_slave_size(const struct net_device *bond_dev, + const struct net_device *slave_dev) +{ + return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */ +} + +static int vrf_fill_slave_info(struct sk_buff *skb, + const struct net_device *vrf_dev, + const struct net_device *slave_dev) +{ + struct net_vrf *vrf = netdev_priv(vrf_dev); + + if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id)) + return -EMSGSIZE; + + return 0; +} + static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = { [IFLA_VRF_TABLE] = { .type = NLA_U32 }, }; @@ -890,6 +908,9 @@ static struct rtnl_link_ops vrf_link_ops __read_mostly = { .validate = vrf_validate, .fill_info = vrf_fillinfo, + .get_slave_size = vrf_get_slave_size, + .fill_slave_info = vrf_fill_slave_info, + .newlink = vrf_newlink, .dellink = vrf_dellink, .setup = vrf_setup, diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index d3e90b91e07e..d452cea59020 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -405,6 +405,14 @@ enum { #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1) +enum { + IFLA_VRF_PORT_UNSPEC, + IFLA_VRF_PORT_TABLE, + __IFLA_VRF_PORT_MAX +}; + +#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1) + /* IPVLAN section */ enum { IFLA_IPVLAN_UNSPEC, -- cgit v1.2.3 From 365a10289fccbf769ad8b172c30716fac50fa278 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 2 Feb 2016 10:41:56 -0800 Subject: sunvnet: perf tracepoint invocations to trace LDC state machine Use sunvnet perf trace macros to monitor LDC message exchange state. Signed-off-by: Sowmini Varadhan Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet.c | 24 ++++++++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 23fa29877f5b..942a95db2061 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -17,6 +17,8 @@ #include #include #include +#define CREATE_TRACE_POINTS +#include #if IS_ENABLED(CONFIG_IPV6) #include @@ -540,6 +542,8 @@ static int vnet_walk_rx_one(struct vnet_port *port, err = vnet_rx_one(port, desc); if (err == -ECONNRESET) return err; + trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, + index, desc->hdr.ack); desc->hdr.state = VIO_DESC_DONE; err = put_rx_desc(port, dr, desc, index); if (err < 0) @@ -587,9 +591,15 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, ack_start = ack_end = vio_dring_prev(dr, start); if (send_ack) { port->napi_resume = false; + trace_vnet_tx_send_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); return vnet_send_ack(port, dr, ack_start, ack_end, VIO_DRING_STOPPED); } else { + trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); port->napi_resume = true; port->napi_stop_idx = ack_end; return 1; @@ -663,6 +673,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) /* sync for race conditions with vnet_start_xmit() and tell xmit it * is time to send a trigger. */ + trace_vnet_rx_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, end); dr->cons = vio_dring_next(dr, end); desc = vio_dring_entry(dr, dr->cons); if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { @@ -886,6 +898,9 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) int retries = 0; if (port->stop_rx) { + trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + port->stop_rx_idx, -1); err = vnet_send_ack(port, &port->vio.drings[VIO_DRIVER_RX_RING], port->stop_rx_idx, -1, @@ -908,6 +923,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) if (retries++ > VNET_MAX_RETRIES) break; } while (err == -EAGAIN); + trace_vnet_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, start, err); return err; } @@ -1414,8 +1431,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) * producer to consumer announcement that work is available to the * consumer */ - if (!port->start_cons) - goto ldc_start_done; /* previous trigger suffices */ + if (!port->start_cons) { /* previous trigger suffices */ + trace_vnet_skip_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, dr->cons); + goto ldc_start_done; + } err = __vnet_tx_trigger(port, dr->cons); if (unlikely(err < 0)) { -- cgit v1.2.3 From 16032be56c1f66770da15cb94f0eb366c37aff6e Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Wed, 3 Feb 2016 04:04:37 +0100 Subject: virtio_net: add ethtool support for set and get of settings This patch allows the user to set and retrieve speed and duplex of the virtio_net device via ethtool. Having this functionality is very helpful for simulating different environments and also enables the virtio_net device to participate in operations where proper speed and duplex are required (e.g. currently bonding lacp mode requires full duplex). Custom speed and duplex are not allowed, the user-supplied settings are validated before applying. Example: $ ethtool eth1 Settings for eth1: ... Speed: Unknown! Duplex: Unknown! (255) $ ethtool -s eth1 speed 1000 duplex full $ ethtool eth1 Settings for eth1: ... Speed: 1000Mb/s Duplex: Full Based on a patch by Roopa Prabhu. Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) (limited to 'drivers') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 767ab11a6e9f..c9fd52a8e6ec 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -146,6 +146,10 @@ struct virtnet_info { virtio_net_ctrl_ack ctrl_status; u8 ctrl_promisc; u8 ctrl_allmulti; + + /* Ethtool settings */ + u8 duplex; + u32 speed; }; struct padded_vnet_hdr { @@ -1376,6 +1380,58 @@ static void virtnet_get_channels(struct net_device *dev, channels->other_count = 0; } +/* Check if the user is trying to change anything besides speed/duplex */ +static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) +{ + struct ethtool_cmd diff1 = *cmd; + struct ethtool_cmd diff2 = {}; + + /* advertising and cmd are usually set, ignore port because we set it */ + ethtool_cmd_speed_set(&diff1, 0); + diff1.advertising = 0; + diff1.duplex = 0; + diff1.port = 0; + diff1.cmd = 0; + + return !memcmp(&diff1, &diff2, sizeof(diff1)); +} + +static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct virtnet_info *vi = netdev_priv(dev); + u32 speed; + + speed = ethtool_cmd_speed(cmd); + /* don't allow custom speed and duplex */ + if (!ethtool_validate_speed(speed) || + !ethtool_validate_duplex(cmd->duplex) || + !virtnet_validate_ethtool_cmd(cmd)) + return -EINVAL; + vi->speed = speed; + vi->duplex = cmd->duplex; + + return 0; +} + +static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct virtnet_info *vi = netdev_priv(dev); + + ethtool_cmd_speed_set(cmd, vi->speed); + cmd->duplex = vi->duplex; + cmd->port = PORT_OTHER; + + return 0; +} + +static void virtnet_init_settings(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + + vi->speed = SPEED_UNKNOWN; + vi->duplex = DUPLEX_UNKNOWN; +} + static const struct ethtool_ops virtnet_ethtool_ops = { .get_drvinfo = virtnet_get_drvinfo, .get_link = ethtool_op_get_link, @@ -1383,6 +1439,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = { .set_channels = virtnet_set_channels, .get_channels = virtnet_get_channels, .get_ts_info = ethtool_op_get_ts_info, + .get_settings = virtnet_get_settings, + .set_settings = virtnet_set_settings, }; #define MIN_MTU 68 @@ -1855,6 +1913,8 @@ static int virtnet_probe(struct virtio_device *vdev) netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); + virtnet_init_settings(dev); + err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed\n"); -- cgit v1.2.3 From d66bd9050bba153c35736e53b8a3ddbff328adc5 Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Wed, 3 Feb 2016 02:02:32 +0000 Subject: bonding: trivial: style fixes remove some redudant brackets, use sizeof(*) instead of sizeof(struct x). Signed-off-by: Zhang Shengju Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7c9eb6704a7d..705cb0198faa 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -928,11 +928,10 @@ void bond_select_active_slave(struct bonding *bond) if (!rv) return; - if (netif_carrier_ok(bond->dev)) { + if (netif_carrier_ok(bond->dev)) netdev_info(bond->dev, "first active interface up!\n"); - } else { + else netdev_info(bond->dev, "now running without any active interface!\n"); - } } } @@ -1178,9 +1177,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) } } - if (bond_should_deliver_exact_match(skb, slave, bond)) { + if (bond_should_deliver_exact_match(skb, slave, bond)) return RX_HANDLER_EXACT; - } skb->dev = bond->dev; @@ -1241,7 +1239,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond) { struct slave *slave = NULL; - slave = kzalloc(sizeof(struct slave), GFP_KERNEL); + slave = kzalloc(sizeof(*slave), GFP_KERNEL); if (!slave) return NULL; -- cgit v1.2.3 From 5ee14e6d336f1daacf5ba73e831029c5ab7ae329 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Wed, 3 Feb 2016 13:17:01 +0100 Subject: bonding: 3ad: apply ad_actor settings changes immediately Currently the bonding allows to set ad_actor_system and prio while the bond device is down, but these are actually applied only if there aren't any slaves yet (applied to bond device when first slave shows up, and to slaves at 3ad bind time). After this patch changes are applied immediately and the new values can be used/seen after the bond's upped so it's not necessary anymore to release all and enslave again to see the changes. CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek Signed-off-by: Nikolay Aleksandrov Signed-off-by: Jay Vosburgh Signed-off-by: David S. Miller --- drivers/net/bonding/bond_3ad.c | 40 +++++++++++++++++++++++++++++++++++--- drivers/net/bonding/bond_options.c | 4 ++++ include/net/bond_3ad.h | 1 + 3 files changed, 42 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 4cbb8b27a891..ee94056dbb2e 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -357,6 +357,14 @@ static u8 __get_duplex(struct port *port) return retval; } +static void __ad_actor_update_port(struct port *port) +{ + const struct bonding *bond = bond_get_bond_by_slave(port->slave); + + port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; + port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority; +} + /* Conversions */ /** @@ -1963,9 +1971,7 @@ void bond_3ad_bind_slave(struct slave *slave) port->actor_admin_port_key = bond->params.ad_user_port_key << 6; ad_update_actor_keys(port, false); /* actor system is the bond's system */ - port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr; - port->actor_system_priority = - BOND_AD_INFO(bond).system.sys_priority; + __ad_actor_update_port(port); /* tx timer(to verify that no more than MAX_TX_IN_SECOND * lacpdu's are sent in one second) */ @@ -2147,6 +2153,34 @@ out: spin_unlock_bh(&bond->mode_lock); } +/** + * bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports + * @bond: bonding struct to work on + * + * If an ad_actor setting gets changed we need to update the individual port + * settings so the bond device will use the new values when it gets upped. + */ +void bond_3ad_update_ad_actor_settings(struct bonding *bond) +{ + struct list_head *iter; + struct slave *slave; + + ASSERT_RTNL(); + + BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio; + if (is_zero_ether_addr(bond->params.ad_actor_system)) + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->dev->dev_addr); + else + BOND_AD_INFO(bond).system.sys_mac_addr = + *((struct mac_addr *)bond->params.ad_actor_system); + + spin_lock_bh(&bond->mode_lock); + bond_for_each_slave(bond, slave, iter) + __ad_actor_update_port(&(SLAVE_AD_INFO(slave)->port)); + spin_unlock_bh(&bond->mode_lock); +} + /** * bond_3ad_state_machine_handler - handle state machines timeout * @bond: bonding struct to work on diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 55e93b6b6d21..ed0bdae64f5e 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1392,6 +1392,8 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond, newval->value); bond->params.ad_actor_sys_prio = newval->value; + bond_3ad_update_ad_actor_settings(bond); + return 0; } @@ -1418,6 +1420,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond, netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac); ether_addr_copy(bond->params.ad_actor_system, mac); + bond_3ad_update_ad_actor_settings(bond); + return 0; err: diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h index f1fbc3b11962..f358ad5e4214 100644 --- a/include/net/bond_3ad.h +++ b/include/net/bond_3ad.h @@ -306,5 +306,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); +void bond_3ad_update_ad_actor_settings(struct bonding *bond); #endif /* _NET_BOND_3AD_H */ -- cgit v1.2.3 From 568b3ce7a8efdc23ad2f0b9ca8d82899cf68972d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 10 Feb 2016 01:37:44 +0300 Subject: ravb: factor out register bit twiddling code The driver has often repeated pattern of reading a register, AND'ing and/or OR'ing some bits and writing the value back. Factor the pattern out into ravb_modify() -- this saves 260 bytes of code with ARM gcc 4.7.3. While at it, update Cogent Embedded's copyrights. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb.h | 4 +- drivers/net/ethernet/renesas/ravb_main.c | 68 +++++++++++++------------------- drivers/net/ethernet/renesas/ravb_ptp.c | 25 +++--------- 3 files changed, 36 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 9fbe92ac225b..b2160d1b9c71 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -2,7 +2,7 @@ * * Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. * * Based on the SuperH Ethernet driver * @@ -837,6 +837,8 @@ static inline void ravb_write(struct net_device *ndev, u32 data, iowrite32(data, priv->addr + reg); } +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set); int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value); irqreturn_t ravb_ptp_interrupt(struct net_device *ndev); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index ac43ed914fcf..c936682aae68 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -2,7 +2,7 @@ * * Copyright (C) 2014-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. * * Based on the SuperH Ethernet driver * @@ -42,6 +42,12 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, + u32 set) +{ + ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg); +} + int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value) { int i; @@ -59,8 +65,7 @@ static int ravb_config(struct net_device *ndev) int error; /* Set config mode */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG, - CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Check if the operating mode is changed to the config mode */ error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG); if (error) @@ -72,13 +77,8 @@ static int ravb_config(struct net_device *ndev) static void ravb_set_duplex(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - u32 ecmr = ravb_read(ndev, ECMR); - if (priv->duplex) /* Full */ - ecmr |= ECMR_DM; - else /* Half */ - ecmr &= ~ECMR_DM; - ravb_write(ndev, ecmr, ECMR); + ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0); } static void ravb_set_rate(struct net_device *ndev) @@ -131,13 +131,8 @@ static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set) { struct ravb_private *priv = container_of(ctrl, struct ravb_private, mdiobb); - u32 pir = ravb_read(priv->ndev, PIR); - if (set) - pir |= mask; - else - pir &= ~mask; - ravb_write(priv->ndev, pir, PIR); + ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); } /* MDC pin control */ @@ -393,9 +388,9 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_ring_format(ndev, RAVB_NC); #if defined(__LITTLE_ENDIAN) - ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC); + ravb_modify(ndev, CCC, CCC_BOC, 0); #else - ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC); + ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC); #endif /* Set AVB RX */ @@ -418,8 +413,7 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); /* Setting the control will start the AVB-DMAC process. */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION, - CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); return 0; } @@ -493,7 +487,7 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) break; } } - ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR); + ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR); } } @@ -613,13 +607,13 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) static void ravb_rcv_snd_disable(struct net_device *ndev) { /* Disable TX and RX */ - ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR); + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); } static void ravb_rcv_snd_enable(struct net_device *ndev) { /* Enable TX and RX */ - ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR); + ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } /* function for waiting dma process finished */ @@ -812,8 +806,8 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0); - ravb_write(ndev, ravb_read(ndev, TIC) | mask, TIC); + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -852,8 +846,7 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_set_rate(ndev); } if (!priv->link) { - ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF, - ECMR); + ravb_modify(ndev, ECMR, ECMR_TXF, 0); new_state = true; priv->link = phydev->link; if (priv->no_avb_link) @@ -1393,7 +1386,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc--; desc->die_dt = DT_FSTART; - ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); + ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); priv->cur_tx[q] += NUM_TX_DESC; if (priv->cur_tx[q] - priv->dirty_tx[q] > @@ -1468,15 +1461,10 @@ static void ravb_set_rx_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; - u32 ecmr; spin_lock_irqsave(&priv->lock, flags); - ecmr = ravb_read(ndev, ECMR); - if (ndev->flags & IFF_PROMISC) - ecmr |= ECMR_PRM; - else - ecmr &= ~ECMR_PRM; - ravb_write(ndev, ecmr, ECMR); + ravb_modify(ndev, ECMR, ECMR_PRM, + ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); } @@ -1804,14 +1792,12 @@ static int ravb_probe(struct platform_device *pdev) /* Set AVB config mode */ if (chip_id == RCAR_GEN2) { - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | - CCC_OPC_CONFIG, CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | - CCC_CSEL_HPB, CCC); + ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); } else { - ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | - CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); + ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG | + CCC_GAC | CCC_CSEL_HPB); } /* Set CSEL value */ @@ -1824,7 +1810,7 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; /* Request GTI loading */ - ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR); + ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); /* Allocate descriptor base address table */ priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 7a8ce920c49e..57992ccc4657 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -2,7 +2,7 @@ * * Copyright (C) 2013-2015 Renesas Electronics Corporation * Copyright (C) 2015 Renesas Solutions Corp. - * Copyright (C) 2015 Cogent Embedded, Inc. + * Copyright (C) 2015-2016 Cogent Embedded, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,7 +21,7 @@ static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request) if (error) return error; - ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR); + ravb_modify(ndev, GCCR, request, request); return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); } @@ -185,7 +185,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, ptp.info); struct net_device *ndev = priv->ndev; unsigned long flags; - u32 gic; if (req->index) return -EINVAL; @@ -195,12 +194,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - gic = ravb_read(ndev, GIC); - if (on) - gic |= GIC_PTCE; - else - gic &= ~GIC_PTCE; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -216,7 +210,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, struct ravb_ptp_perout *perout; unsigned long flags; int error = 0; - u32 gic; if (req->index) return -EINVAL; @@ -248,9 +241,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - gic = ravb_read(ndev, GIC); - gic |= GIC_PTME; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); } } else { spin_lock_irqsave(&priv->lock, flags); @@ -259,9 +250,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - gic = ravb_read(ndev, GIC); - gic &= ~GIC_PTME; - ravb_write(ndev, gic, GIC); + ravb_modify(ndev, GIC, GIC_PTME, 0); } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -331,7 +320,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) { struct ravb_private *priv = netdev_priv(ndev); unsigned long flags; - u32 gccr; priv->ptp.info = ravb_ptp_info; @@ -340,8 +328,7 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) spin_lock_irqsave(&priv->lock, flags); ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ); - gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS; - ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR); + ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); -- cgit v1.2.3 From b2b14d2f57df89fc2d57099ce13c5b513061d3a1 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 10 Feb 2016 01:38:28 +0300 Subject: sh_eth: factor out register bit twiddling code The driver has often repeated pattern of reading a register, AND'ing and/or OR'ing some bits and writing the value back. Factor the pattern out into sh_eth_modify() -- this saves 84 bytes of code with ARM gcc 4.7.3. While at it, update Cogent Embedded's copyright. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 53 ++++++++++++++++------------------- 1 file changed, 24 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index dfa9e59c9442..0a150b228914 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3,7 +3,7 @@ * Copyright (C) 2014 Renesas Electronics Corporation * Copyright (C) 2006-2012 Nobuhiro Iwamatsu * Copyright (C) 2008-2014 Renesas Solutions Corp. - * Copyright (C) 2013-2014 Cogent Embedded, Inc. + * Copyright (C) 2013-2016 Cogent Embedded, Inc. * Copyright (C) 2014 Codethink Limited * * This program is free software; you can redistribute it and/or modify it @@ -428,6 +428,13 @@ static u32 sh_eth_read(struct net_device *ndev, int enum_index) return ioread32(mdp->addr + offset); } +static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear, + u32 set) +{ + sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set, + enum_index); +} + static bool sh_eth_is_gether(struct sh_eth_private *mdp) { return mdp->reg_offset == sh_eth_offset_gigabit; @@ -467,10 +474,7 @@ static void sh_eth_set_duplex(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); - if (mdp->duplex) /* Full */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); - else /* Half */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); } static void sh_eth_chip_reset(struct net_device *ndev) @@ -583,10 +587,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_ELB, 0); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB); break; default: break; @@ -649,10 +653,10 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev) switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RTM, 0); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM); break; default: break; @@ -924,8 +928,7 @@ static int sh_eth_reset(struct net_device *ndev) if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) { sh_eth_write(ndev, EDSR_ENALL, EDSR); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER); ret = sh_eth_check_reset(ndev); if (ret) @@ -949,11 +952,9 @@ static int sh_eth_reset(struct net_device *ndev) if (mdp->cd->select_mii) sh_eth_select_mii(ndev); } else { - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER); mdelay(3); - sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, - EDMR); + sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0); } return ret; @@ -1285,7 +1286,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); - sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); + sh_eth_modify(ndev, EESR, 0, 0); if (start) { mdp->irq_enabled = true; sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); @@ -1532,15 +1533,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) static void sh_eth_rcv_snd_disable(struct net_device *ndev) { /* disable tx and rx */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & - ~(ECMR_RE | ECMR_TE), ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0); } static void sh_eth_rcv_snd_enable(struct net_device *ndev) { /* enable tx and rx */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | - (ECMR_RE | ECMR_TE), ECMR); + sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE); } /* error control function */ @@ -1569,13 +1568,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) sh_eth_rcv_snd_disable(ndev); } else { /* Link Up */ - sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & - ~DMAC_M_ECI, EESIPR); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0); /* clear int */ - sh_eth_write(ndev, sh_eth_read(ndev, ECSR), - ECSR); - sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | - DMAC_M_ECI, EESIPR); + sh_eth_modify(ndev, ECSR, 0, 0); + sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, + DMAC_M_ECI); /* enable tx and rx */ sh_eth_rcv_snd_enable(ndev); } @@ -1765,9 +1762,7 @@ static void sh_eth_adjust_link(struct net_device *ndev) mdp->cd->set_rate(ndev); } if (!mdp->link) { - sh_eth_write(ndev, - sh_eth_read(ndev, ECMR) & ~ECMR_TXF, - ECMR); + sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); new_state = 1; mdp->link = phydev->link; if (mdp->cd->no_psr || mdp->no_ether_link) -- cgit v1.2.3 From aaa5672052cb0f18a5ab0ab19e9ef71c50e018a8 Mon Sep 17 00:00:00 2001 From: Jean Sacren Date: Tue, 9 Feb 2016 20:47:17 -0700 Subject: sxgbe: remove unused code Remove the unused code of sxgbe_xpcs. Reported-by: Julia Lawall Suggested-by: David S. Miller Signed-off-by: Jean Sacren Cc: Byungho An Cc: Girish K S Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1601191918470.2531@hadrien Signed-off-by: David S. Miller --- drivers/net/ethernet/samsung/sxgbe/Makefile | 2 +- drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c | 91 ------------------------- drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h | 38 ----------- 3 files changed, 1 insertion(+), 130 deletions(-) delete mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c delete mode 100644 drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h (limited to 'drivers') diff --git a/drivers/net/ethernet/samsung/sxgbe/Makefile b/drivers/net/ethernet/samsung/sxgbe/Makefile index dcc80b9d4370..31e968561d5c 100644 --- a/drivers/net/ethernet/samsung/sxgbe/Makefile +++ b/drivers/net/ethernet/samsung/sxgbe/Makefile @@ -1,4 +1,4 @@ obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \ sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o sxgbe_mdio.o \ - sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y) + sxgbe_ethtool.o $(samsung-sxgbe-y) diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c deleted file mode 100644 index 51c32194ba88..000000000000 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c +++ /dev/null @@ -1,91 +0,0 @@ -/* 10G controller driver for Samsung SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Siva Reddy Kallam - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#include -#include -#include -#include -#include "sxgbe_common.h" -#include "sxgbe_xpcs.h" - -static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg) -{ - u32 value; - struct sxgbe_priv_data *priv = netdev_priv(ndev); - - value = readl(priv->ioaddr + XPCS_OFFSET + reg); - - return value; -} - -static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data) -{ - struct sxgbe_priv_data *priv = netdev_priv(ndev); - - writel(data, priv->ioaddr + XPCS_OFFSET + reg); - - return 0; -} - -int sxgbe_xpcs_init(struct net_device *ndev) -{ - u32 value; - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - /* 10G XAUI mode */ - sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13)); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE); - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); - - return 0; -} - -int sxgbe_xpcs_init_1G(struct net_device *ndev) -{ - int value; - - /* 10GBASE-X PCS (1G) mode */ - sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X); - sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE); - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13)); - - value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6)); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13)); - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11)); - - do { - value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS); - } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE); - - value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1); - sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11)); - - /* Auto Negotiation cluase 37 enable */ - value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL); - sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12)); - - return 0; -} diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h deleted file mode 100644 index 6b26a50724d3..000000000000 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h +++ /dev/null @@ -1,38 +0,0 @@ -/* 10G controller driver for Samsung SoCs - * - * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * Author: Byungho An - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __SXGBE_XPCS_H__ -#define __SXGBE_XPCS_H__ - -/* XPCS Registers */ -#define XPCS_OFFSET 0x1A060000 -#define SR_PCS_MMD_CONTROL1 0x030000 -#define SR_PCS_CONTROL2 0x030007 -#define VR_PCS_MMD_XAUI_MODE_CONTROL 0x038004 -#define VR_PCS_MMD_DIGITAL_STATUS 0x038010 -#define SR_MII_MMD_CONTROL 0x1F0000 -#define SR_MII_MMD_AN_ADV 0x1F0004 -#define SR_MII_MMD_AN_LINK_PARTNER_BA 0x1F0005 -#define VR_MII_MMD_AN_CONTROL 0x1F8001 -#define VR_MII_MMD_AN_INT_STATUS 0x1F8002 - -#define XPCS_QSEQ_STATE_STABLE 0x10 -#define XPCS_QSEQ_STATE_MPLLOFF 0x1c -#define XPCS_TYPE_SEL_R 0x00 -#define XPCS_TYPE_SEL_X 0x01 -#define XPCS_TYPE_SEL_W 0x02 -#define XPCS_XAUI_MODE 0x00 -#define XPCS_RXAUI_MODE 0x01 - -int sxgbe_xpcs_init(struct net_device *ndev); -int sxgbe_xpcs_init_1G(struct net_device *ndev); - -#endif /* __SXGBE_XPCS_H__ */ -- cgit v1.2.3 From 622190669403fb7a7557a8b618e57c0a3f703056 Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Wed, 10 Feb 2016 22:45:53 +0530 Subject: be2net: Request RSS capability of Rx interface depending on number of Rx rings Currently we request RSS capability even if a single Rx ring is created. As a result in few cases we unnecessarily consume an RSS capable interface which is a limited resource in the chip. This patch enables RSS on an interface only if more than one Rx ring is created. Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_cmds.c | 19 +++------- drivers/net/ethernet/emulex/benet/be_main.c | 58 +++++++++++++++++++++-------- 2 files changed, 48 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 7d51d4733890..33cbbde218be 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -1513,34 +1513,25 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, return status; } -/* Uses MCCQ */ +/* Uses MCCQ if available else MBOX */ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) { - struct be_mcc_wrb *wrb; + struct be_mcc_wrb wrb = {0}; struct be_cmd_req_if_destroy *req; int status; if (interface_id == -1) return 0; - spin_lock_bh(&adapter->mcc_lock); - - wrb = wrb_from_mccq(adapter); - if (!wrb) { - status = -EBUSY; - goto err; - } - req = embedded_payload(wrb); + req = embedded_payload(&wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_DESTROY, - sizeof(*req), wrb, NULL); + sizeof(*req), &wrb, NULL); req->hdr.domain = domain; req->interface_id = cpu_to_le32(interface_id); - status = be_mcc_notify_wait(adapter); -err: - spin_unlock_bh(&adapter->mcc_lock); + status = be_cmd_notify_wait(adapter, &wrb); return status; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 9c1fc9dcea25..e4fc87455e00 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -3365,6 +3365,7 @@ done: static void be_rx_qs_destroy(struct be_adapter *adapter) { + struct rss_info *rss = &adapter->rss_info; struct be_queue_info *q; struct be_rx_obj *rxo; int i; @@ -3391,6 +3392,12 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) } be_queue_free(adapter, q); } + + if (rss->rss_flags) { + rss->rss_flags = RSS_ENABLE_NONE; + be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + 128, rss->rss_hkey); + } } static void be_disable_if_filters(struct be_adapter *adapter) @@ -3511,20 +3518,21 @@ static int be_rx_qs_create(struct be_adapter *adapter) if (!BEx_chip(adapter)) rss->rss_flags |= RSS_ENABLE_UDP_IPV4 | RSS_ENABLE_UDP_IPV6; + + netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); + rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, + RSS_INDIR_TABLE_LEN, rss_key); + if (rc) { + rss->rss_flags = RSS_ENABLE_NONE; + return rc; + } + + memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); } else { /* Disable RSS, if only default RX Q is created */ rss->rss_flags = RSS_ENABLE_NONE; } - netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN); - rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags, - RSS_INDIR_TABLE_LEN, rss_key); - if (rc) { - rss->rss_flags = RSS_ENABLE_NONE; - return rc; - } - - memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN); /* Post 1 less than RXQ-len to avoid head being equal to tail, * which is a queue empty condition @@ -4306,6 +4314,23 @@ err: return status; } +static int be_if_create(struct be_adapter *adapter) +{ + u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; + u32 cap_flags = be_if_cap_flags(adapter); + int status; + + if (adapter->cfg_num_qs == 1) + cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS); + + en_flags &= cap_flags; + /* will enable all the needed filter flags in be_open() */ + status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, + &adapter->if_handle, 0); + + return status; +} + int be_update_queues(struct be_adapter *adapter) { struct net_device *netdev = adapter->netdev; @@ -4323,6 +4348,9 @@ int be_update_queues(struct be_adapter *adapter) be_msix_disable(adapter); be_clear_queues(adapter); + status = be_cmd_if_destroy(adapter, adapter->if_handle, 0); + if (status) + return status; if (!msix_enabled(adapter)) { status = be_msix_enable(adapter); @@ -4330,6 +4358,10 @@ int be_update_queues(struct be_adapter *adapter) return status; } + status = be_if_create(adapter); + if (status) + return status; + status = be_setup_queues(adapter); if (status) return status; @@ -4394,7 +4426,6 @@ static int be_func_init(struct be_adapter *adapter) static int be_setup(struct be_adapter *adapter) { struct device *dev = &adapter->pdev->dev; - u32 en_flags; int status; status = be_func_init(adapter); @@ -4427,10 +4458,7 @@ static int be_setup(struct be_adapter *adapter) goto err; /* will enable all the needed filter flags in be_open() */ - en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS; - en_flags = en_flags & be_if_cap_flags(adapter); - status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags, - &adapter->if_handle, 0); + status = be_if_create(adapter); if (status) goto err; @@ -4803,7 +4831,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX; - if (be_multi_rxq(adapter)) + if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) netdev->hw_features |= NETIF_F_RXHASH; netdev->features |= netdev->hw_features | -- cgit v1.2.3 From 51d1f98a136fcb16fa3304c2386d9a5202335a63 Mon Sep 17 00:00:00 2001 From: Ajit Khaparde Date: Wed, 10 Feb 2016 22:45:54 +0530 Subject: be2net: Interpret and log new data that's added to the port misconfigure async event >From FW version 11.0. onwards, the PORT_MISCONFIG event generated by the FW will carry more information about the event in the "data_word1" and "data_word2" fields. This patch adds support in the driver to parse the new information and log it accordingly. This patch also changes some of the messages that are being logged currently. Signed-off-by: Suresh Reddy Signed-off-by: Venkat Duvvuru Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 5 +- drivers/net/ethernet/emulex/benet/be_cmds.c | 82 +++++++++++++++++++++-------- drivers/net/ethernet/emulex/benet/be_cmds.h | 45 +++++++++++++++- drivers/net/ethernet/emulex/benet/be_main.c | 11 ++-- 4 files changed, 116 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 515e206589cc..ab24f84060c6 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -115,6 +115,8 @@ #define RSS_INDIR_TABLE_LEN 128 #define RSS_HASH_KEY_LEN 40 +#define BE_UNKNOWN_PHY_STATE 0xFF + struct be_dma_mem { void *va; dma_addr_t dma; @@ -390,7 +392,7 @@ enum vf_state { #define BE_FLAGS_QNQ_ASYNC_EVT_RCVD BIT(7) #define BE_FLAGS_VXLAN_OFFLOADS BIT(8) #define BE_FLAGS_SETUP_DONE BIT(9) -#define BE_FLAGS_EVT_INCOMPATIBLE_SFP BIT(10) +#define BE_FLAGS_PHY_MISCONFIGURED BIT(10) #define BE_FLAGS_ERR_DETECTION_SCHEDULED BIT(11) #define BE_FLAGS_OS2BMC BIT(12) @@ -603,6 +605,7 @@ struct be_adapter { u32 bmc_filt_mask; u32 fat_dump_len; u16 serial_num[CNTL_SERIAL_NUM_WORDS]; + u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */ }; #define be_physfn(adapter) (!adapter->virtfn) diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 33cbbde218be..66fa21426fe2 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -19,19 +19,25 @@ #include "be.h" #include "be_cmds.h" -static char *be_port_misconfig_evt_desc[] = { - "A valid SFP module detected", - "Optics faulted/ incorrectly installed/ not installed.", - "Optics of two types installed.", - "Incompatible optics.", - "Unknown port SFP status" +char *be_misconfig_evt_port_state[] = { + "Physical Link is functional", + "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.", + "Optics of two types installed – Remove one optic or install matching pair of optics.", + "Incompatible optics – Replace with compatible optics for card to function.", + "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.", + "Uncertified optics – Replace with Avago-certified optics to enable link operation." }; -static char *be_port_misconfig_remedy_desc[] = { - "", - "Reseat optics. If issue not resolved, replace", - "Remove one optic or install matching pair of optics", - "Replace with compatible optics for card to function", +static char *be_port_misconfig_evt_severity[] = { + "KERN_WARN", + "KERN_INFO", + "KERN_ERR", + "KERN_WARN" +}; + +static char *phy_state_oper_desc[] = { + "Link is non-operational", + "Link is operational", "" }; @@ -297,22 +303,56 @@ static void be_async_port_misconfig_event_process(struct be_adapter *adapter, { struct be_async_event_misconfig_port *evt = (struct be_async_event_misconfig_port *)compl; - u32 sfp_mismatch_evt = le32_to_cpu(evt->event_data_word1); + u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1); + u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2); + u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE; struct device *dev = &adapter->pdev->dev; - u8 port_misconfig_evt; + u8 msg_severity = DEFAULT_MSG_SEVERITY; + u8 phy_state_info; + u8 new_phy_state; + + new_phy_state = + (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff; + + if (new_phy_state == adapter->phy_state) + return; + + adapter->phy_state = new_phy_state; - port_misconfig_evt = - ((sfp_mismatch_evt >> (adapter->hba_port_num * 8)) & 0xff); + /* for older fw that doesn't populate link effect data */ + if (!sfp_misconfig_evt_word2) + goto log_message; + phy_state_info = + (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff; + + if (phy_state_info & PHY_STATE_INFO_VALID) { + msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1; + + if (be_phy_unqualified(new_phy_state)) + phy_oper_state = (phy_state_info & PHY_STATE_OPER); + } + +log_message: /* Log an error message that would allow a user to determine * whether the SFPs have an issue */ - dev_info(dev, "Port %c: %s %s", adapter->port_name, - be_port_misconfig_evt_desc[port_misconfig_evt], - be_port_misconfig_remedy_desc[port_misconfig_evt]); - - if (port_misconfig_evt == INCOMPATIBLE_SFP) - adapter->flags |= BE_FLAGS_EVT_INCOMPATIBLE_SFP; + if (be_phy_state_unknown(new_phy_state)) + dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, + "Port %c: Unrecognized Optics state: 0x%x. %s", + adapter->port_name, + new_phy_state, + phy_state_oper_desc[phy_oper_state]); + else + dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, + "Port %c: %s %s", + adapter->port_name, + be_misconfig_evt_port_state[new_phy_state], + phy_state_oper_desc[phy_oper_state]); + + /* Log Vendor name and part no. if a misconfigured SFP is detected */ + if (be_phy_misconfigured(new_phy_state)) + adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED; } /* Grp5 CoS Priority evt */ diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index f260ef3329a1..0b9f741f31af 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -176,10 +176,53 @@ struct be_async_event_qnq { u32 flags; } __packed; -#define INCOMPATIBLE_SFP 0x3 +enum { + BE_PHY_FUNCTIONAL = 0, + BE_PHY_NOT_PRESENT = 1, + BE_PHY_DIFF_MEDIA = 2, + BE_PHY_INCOMPATIBLE = 3, + BE_PHY_UNQUALIFIED = 4, + BE_PHY_UNCERTIFIED = 5 +}; + +#define PHY_STATE_MSG_SEVERITY 0x6 +#define PHY_STATE_OPER 0x1 +#define PHY_STATE_INFO_VALID 0x80 +#define PHY_STATE_OPER_MSG_NONE 0x2 +#define DEFAULT_MSG_SEVERITY 0x1 + +#define be_phy_state_unknown(phy_state) (phy_state > BE_PHY_UNCERTIFIED) +#define be_phy_unqualified(phy_state) \ + (phy_state == BE_PHY_UNQUALIFIED || \ + phy_state == BE_PHY_UNCERTIFIED) +#define be_phy_misconfigured(phy_state) \ + (phy_state == BE_PHY_INCOMPATIBLE || \ + phy_state == BE_PHY_UNQUALIFIED || \ + phy_state == BE_PHY_UNCERTIFIED) + +extern char *be_misconfig_evt_port_state[]; + /* async event indicating misconfigured port */ struct be_async_event_misconfig_port { + /* DATA_WORD1: + * phy state of port 0: bits 7 - 0 + * phy state of port 1: bits 15 - 8 + * phy state of port 2: bits 23 - 16 + * phy state of port 3: bits 31 - 24 + */ u32 event_data_word1; + /* DATA_WORD2: + * phy state info of port 0: bits 7 - 0 + * phy state info of port 1: bits 15 - 8 + * phy state info of port 2: bits 23 - 16 + * phy state info of port 3: bits 31 - 24 + * + * PHY STATE INFO: + * Link operability :bit 0 + * Message severity :bit 2 - 1 + * Rsvd :bits 6 - 3 + * phy state info valid :bit 7 + */ u32 event_data_word2; u32 rsvd0; u32 flags; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index e4fc87455e00..4ecd8c2beaf5 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4089,6 +4089,7 @@ static void be_setup_init(struct be_adapter *adapter) adapter->if_handle = -1; adapter->be3_native = false; adapter->if_flags = 0; + adapter->phy_state = BE_UNKNOWN_PHY_STATE; if (be_physfn(adapter)) adapter->cmd_privileges = MAX_PRIVILEGES; else @@ -4961,11 +4962,13 @@ static void be_log_sfp_info(struct be_adapter *adapter) status = be_cmd_query_sfp_info(adapter); if (!status) { dev_err(&adapter->pdev->dev, - "Unqualified SFP+ detected on %c from %s part no: %s", - adapter->port_name, adapter->phy.vendor_name, + "Port %c: %s Vendor: %s part no: %s", + adapter->port_name, + be_misconfig_evt_port_state[adapter->phy_state], + adapter->phy.vendor_name, adapter->phy.vendor_pn); } - adapter->flags &= ~BE_FLAGS_EVT_INCOMPATIBLE_SFP; + adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED; } static void be_worker(struct work_struct *work) @@ -5009,7 +5012,7 @@ static void be_worker(struct work_struct *work) if (!skyhawk_chip(adapter)) be_eqd_update(adapter, false); - if (adapter->flags & BE_FLAGS_EVT_INCOMPATIBLE_SFP) + if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED) be_log_sfp_info(adapter); reschedule: -- cgit v1.2.3 From 7f20cd252185702f951009e0a56778f870d50ca6 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 4 Feb 2016 17:42:28 +0100 Subject: bonding: 3ad: allow to set ad_actor settings while the bond is up No need to require the bond down while changing these settings, the change will be reflected immediately and the 3ad mode will sort itself out. For faster convergence set port->ntt to true in order to generate new LACPDUs immediately. CC: Jay Vosburgh CC: Veaceslav Falico CC: Andy Gospodarek Signed-off-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/bonding/bond_3ad.c | 8 ++++++-- drivers/net/bonding/bond_options.c | 3 +-- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index ee94056dbb2e..b9304a295f86 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2176,8 +2176,12 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) *((struct mac_addr *)bond->params.ad_actor_system); spin_lock_bh(&bond->mode_lock); - bond_for_each_slave(bond, slave, iter) - __ad_actor_update_port(&(SLAVE_AD_INFO(slave)->port)); + bond_for_each_slave(bond, slave, iter) { + struct port *port = &(SLAVE_AD_INFO(slave))->port; + + __ad_actor_update_port(port); + port->ntt = true; + } spin_unlock_bh(&bond->mode_lock); } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index ed0bdae64f5e..577e57cad1dc 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -402,7 +402,6 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .id = BOND_OPT_AD_ACTOR_SYS_PRIO, .name = "ad_actor_sys_prio", .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), - .flags = BOND_OPTFLAG_IFDOWN, .values = bond_ad_actor_sys_prio_tbl, .set = bond_option_ad_actor_sys_prio_set, }, @@ -410,7 +409,7 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = { .id = BOND_OPT_AD_ACTOR_SYSTEM, .name = "ad_actor_system", .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)), - .flags = BOND_OPTFLAG_RAWVAL | BOND_OPTFLAG_IFDOWN, + .flags = BOND_OPTFLAG_RAWVAL, .set = bond_option_ad_actor_system_set, }, [BOND_OPT_AD_USER_PORT_KEY] = { -- cgit v1.2.3 From 55cd48c821def1d3192f166b4ce48254cf2e5a69 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:43 -0700 Subject: net: fec: stop the "rcv is not +last, " error messages Setting the FTRL register will stop the fec from trying to use multiple receive buffers. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 1 + drivers/net/ethernet/freescale/fec_main.c | 1 + 2 files changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 2106d72c91dc..cc9677ae70ab 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -64,6 +64,7 @@ #define FEC_R_FIFO_RSEM 0x194 /* Receive FIFO section empty threshold */ #define FEC_R_FIFO_RAEM 0x198 /* Receive FIFO almost empty threshold */ #define FEC_R_FIFO_RAFL 0x19c /* Receive FIFO almost full threshold */ +#define FEC_FTRL 0x1b0 /* Frame truncation receive length*/ #define FEC_RACC 0x1c4 /* Receive Accelerator function */ #define FEC_RCMR_1 0x1c8 /* Receive classification match ring 1 */ #define FEC_RCMR_2 0x1cc /* Receive classification match ring 2 */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 41c81f6ec630..3e5b24ae537e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -988,6 +988,7 @@ fec_restart(struct net_device *ndev) val &= ~FEC_RACC_OPTIONS; writel(val, fep->hwp + FEC_RACC); } + writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); #endif /* -- cgit v1.2.3 From 095098e194d1357a509c1d8e9a421bf825dddd2a Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:44 -0700 Subject: net: fec: fix rx error counts On an overrun, the other flags are not valid, so don't check them. Also, don't pass bad frames up the stack. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 36 +++++++++++++------------------ 1 file changed, 15 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3e5b24ae537e..162fa59e3eae 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1408,37 +1408,31 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) break; pkt_received++; - /* Since we have allocated space to hold a complete frame, - * the last indicator should be set. - */ - if ((status & BD_ENET_RX_LAST) == 0) - netdev_err(ndev, "rcv is not +last\n"); - writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ + status ^= BD_ENET_RX_LAST; if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | - BD_ENET_RX_CR | BD_ENET_RX_OV)) { + BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | + BD_ENET_RX_CL)) { ndev->stats.rx_errors++; - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { + if (status & BD_ENET_RX_OV) { + /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + goto rx_processing_done; + } + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH + | BD_ENET_RX_LAST)) { /* Frame too long or too short. */ ndev->stats.rx_length_errors++; + if (status & BD_ENET_RX_LAST) + netdev_err(ndev, "rcv is not +last\n"); } - if (status & BD_ENET_RX_NO) /* Frame alignment */ - ndev->stats.rx_frame_errors++; if (status & BD_ENET_RX_CR) /* CRC Error */ ndev->stats.rx_crc_errors++; - if (status & BD_ENET_RX_OV) /* FIFO overrun */ - ndev->stats.rx_fifo_errors++; - } - - /* Report late collisions as a frame error. - * On this error, the BD is closed, but we don't know what we - * have in the buffer. So, just drop this frame on the floor. - */ - if (status & BD_ENET_RX_CL) { - ndev->stats.rx_errors++; - ndev->stats.rx_frame_errors++; + /* Report late collisions as a frame error. */ + if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) + ndev->stats.rx_frame_errors++; goto rx_processing_done; } -- cgit v1.2.3 From 93c595f7b8c5fe98bd78bab5270a49c40a69f221 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:45 -0700 Subject: net: fec: fix fec_enet_get_free_txdesc_num When first initialized, cur_tx points to the 1st entry in the queue, and dirty_tx points to the last. At this point, fec_enet_get_free_txdesc_num will return tx_ring_size -2. If tx_ring_size -2 entries are now queued, then fec_enet_get_free_txdesc_num should return 0, but it returns tx_ring_size instead. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 162fa59e3eae..adbddfdb5474 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -296,7 +296,7 @@ static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, entries = ((const char *)txq->dirty_tx - (const char *)txq->cur_tx) / fep->bufdesc_size - 1; - return entries > 0 ? entries : entries + txq->tx_ring_size; + return entries >= 0 ? entries : entries + txq->tx_ring_size; } static void swap_buffer(void *bufaddr, int len) -- cgit v1.2.3 From 7355f2760620b385d1335f9cf73fbb1b640c4726 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:46 -0700 Subject: net: fec: add struct bufdesc_prop This reduces code and gains speed. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 29 ++- drivers/net/ethernet/freescale/fec_main.c | 288 ++++++++++++------------------ 2 files changed, 132 insertions(+), 185 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index cc9677ae70ab..53ec04fbdc6d 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -448,33 +448,34 @@ struct bufdesc_ex { /* Controller supports RACC register */ #define FEC_QUIRK_HAS_RACC (1 << 12) +struct bufdesc_prop { + int qid; + /* Address of Rx and Tx buffers */ + struct bufdesc *base; + struct bufdesc *last; + struct bufdesc *cur; + dma_addr_t dma; + unsigned short ring_size; + unsigned char dsize; + unsigned char dsize_log2; +}; + struct fec_enet_priv_tx_q { - int index; + struct bufdesc_prop bd; unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE]; - dma_addr_t bd_dma; - struct bufdesc *tx_bd_base; - uint tx_ring_size; - unsigned short tx_stop_threshold; unsigned short tx_wake_threshold; - struct bufdesc *cur_tx; struct bufdesc *dirty_tx; char *tso_hdrs; dma_addr_t tso_hdrs_dma; }; struct fec_enet_priv_rx_q { - int index; + struct bufdesc_prop bd; struct sk_buff *rx_skbuff[RX_RING_SIZE]; - - dma_addr_t bd_dma; - struct bufdesc *rx_bd_base; - uint rx_ring_size; - - struct bufdesc *cur_rx; }; /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and @@ -514,8 +515,6 @@ struct fec_enet_private { unsigned long work_ts; unsigned long work_mdio; - unsigned short bufdesc_size; - struct platform_device *pdev; int dev_id; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index adbddfdb5474..b039288f5c98 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -217,86 +217,38 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); #define IS_TSO_HEADER(txq, addr) \ ((addr >= txq->tso_hdrs_dma) && \ - (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) + (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) static int mii_cnt; -static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, - struct fec_enet_private *fep, - int queue_id) -{ - struct bufdesc *new_bd = bdp + 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; - struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= txq->tx_bd_base) { - base = txq->tx_bd_base; - ring_size = txq->tx_ring_size; - ex_base = (struct bufdesc_ex *)txq->tx_bd_base; - } else { - base = rxq->rx_bd_base; - ring_size = rxq->rx_ring_size; - ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; - } - - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? - ex_base : ex_new_bd); - else - return (new_bd >= (base + ring_size)) ? - base : new_bd; -} - -static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, - struct fec_enet_private *fep, - int queue_id) -{ - struct bufdesc *new_bd = bdp - 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; - struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; - struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= txq->tx_bd_base) { - base = txq->tx_bd_base; - ring_size = txq->tx_ring_size; - ex_base = (struct bufdesc_ex *)txq->tx_bd_base; - } else { - base = rxq->rx_bd_base; - ring_size = rxq->rx_ring_size; - ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; - } +static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp >= bd->last) ? bd->base + : (struct bufdesc *)(((unsigned)bdp) + bd->dsize); +} - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd < ex_base) ? - (ex_new_bd + ring_size) : ex_new_bd); - else - return (new_bd < base) ? (new_bd + ring_size) : new_bd; +static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, + struct bufdesc_prop *bd) +{ + return (bdp <= bd->base) ? bd->last + : (struct bufdesc *)(((unsigned)bdp) - bd->dsize); } -static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, - struct fec_enet_private *fep) +static int fec_enet_get_bd_index(struct bufdesc *bdp, + struct bufdesc_prop *bd) { - return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; + return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; } -static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, - struct fec_enet_priv_tx_q *txq) +static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) { int entries; - entries = ((const char *)txq->dirty_tx - - (const char *)txq->cur_tx) / fep->bufdesc_size - 1; + entries = (((const char *)txq->dirty_tx - + (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; - return entries >= 0 ? entries : entries + txq->tx_ring_size; + return entries >= 0 ? entries : entries + txq->bd.ring_size; } static void swap_buffer(void *bufaddr, int len) @@ -329,20 +281,20 @@ static void fec_dump(struct net_device *ndev) pr_info("Nr SC addr len SKB\n"); txq = fep->tx_queue[0]; - bdp = txq->tx_bd_base; + bdp = txq->bd.base; do { pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", index, - bdp == txq->cur_tx ? 'S' : ' ', + bdp == txq->bd.cur ? 'S' : ' ', bdp == txq->dirty_tx ? 'H' : ' ', fec16_to_cpu(bdp->cbd_sc), fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), txq->tx_skbuff[index]); - bdp = fec_enet_get_nextdesc(bdp, fep, 0); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; - } while (bdp != txq->tx_bd_base); + } while (bdp != txq->bd.base); } static inline bool is_ipv4_pkt(struct sk_buff *skb) @@ -373,7 +325,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct bufdesc *bdp = txq->cur_tx; + struct bufdesc *bdp = txq->bd.cur; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; unsigned short queue = skb_get_queue_mapping(skb); @@ -388,7 +340,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, for (frag = 0; frag < nr_frags; frag++) { this_frag = &skb_shinfo(skb)->frags[frag]; - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); ebdp = (struct bufdesc_ex *)bdp; status = fec16_to_cpu(bdp->cbd_sc); @@ -418,7 +370,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], bufaddr, frag_len); @@ -444,9 +396,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, return bdp; dma_mapping_error: - bdp = txq->cur_tx; + bdp = txq->bd.cur; for (i = 0; i < frag; i++) { - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); } @@ -468,7 +420,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, unsigned int index; int entries_free; - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { dev_kfree_skb_any(skb); if (net_ratelimit()) @@ -483,7 +435,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, } /* Fill in a Tx ring entry */ - bdp = txq->cur_tx; + bdp = txq->bd.cur; last_bdp = bdp; status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; @@ -493,7 +445,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, buflen = skb_headlen(skb); queue = skb_get_queue_mapping(skb); - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { memcpy(txq->tx_bounce[index], skb->data, buflen); @@ -544,7 +496,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, ebdp->cbd_esc = cpu_to_fec32(estatus); } - index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); + index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ txq->tx_skbuff[index] = skb; @@ -558,15 +510,15 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_sc = cpu_to_fec16(status); /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); + bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); skb_tx_timestamp(skb); /* Make sure the update to bdp and tx_skbuff are performed before - * cur_tx. + * txq->bd.cur. */ wmb(); - txq->cur_tx = bdp; + txq->bd.cur = bdp; /* Trigger transmission start */ writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); @@ -697,13 +649,13 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; - struct bufdesc *bdp = txq->cur_tx; + struct bufdesc *bdp = txq->bd.cur; unsigned short queue = skb_get_queue_mapping(skb); struct tso_t tso; unsigned int index = 0; int ret; - if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { + if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "NOT enough BD for TSO!\n"); @@ -723,7 +675,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, while (total_len > 0) { char *hdr; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); total_len -= data_left; @@ -738,9 +690,8 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, int size; size = min_t(int, tso.size, data_left); - bdp = fec_enet_get_nextdesc(bdp, fep, queue); - index = fec_enet_get_bd_index(txq->tx_bd_base, - bdp, fep); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + index = fec_enet_get_bd_index(bdp, &txq->bd); ret = fec_enet_txq_put_data_tso(txq, skb, ndev, bdp, index, tso.data, size, @@ -753,14 +704,14 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, tso_build_data(skb, &tso, size); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Save skb pointer */ txq->tx_skbuff[index] = skb; skb_tx_timestamp(skb); - txq->cur_tx = bdp; + txq->bd.cur = bdp; /* Trigger transmission start */ if (!(fep->quirks & FEC_QUIRK_ERR007885) || @@ -798,7 +749,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (ret) return ret; - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free <= txq->tx_stop_threshold) netif_tx_stop_queue(nq); @@ -819,32 +770,32 @@ static void fec_enet_bd_init(struct net_device *dev) for (q = 0; q < fep->num_rx_queues; q++) { /* Initialize the receive buffer descriptors. */ rxq = fep->rx_queue[q]; - bdp = rxq->rx_bd_base; + bdp = rxq->bd.base; - for (i = 0; i < rxq->rx_ring_size; i++) { + for (i = 0; i < rxq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); else bdp->cbd_sc = cpu_to_fec16(0); - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); - rxq->cur_rx = rxq->rx_bd_base; + rxq->bd.cur = rxq->bd.base; } for (q = 0; q < fep->num_tx_queues; q++) { /* ...and the same for transmit */ txq = fep->tx_queue[q]; - bdp = txq->tx_bd_base; - txq->cur_tx = bdp; + bdp = txq->bd.base; + txq->bd.cur = bdp; - for (i = 0; i < txq->tx_ring_size; i++) { + for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); if (txq->tx_skbuff[i]) { @@ -852,11 +803,11 @@ static void fec_enet_bd_init(struct net_device *dev) txq->tx_skbuff[i] = NULL; } bdp->cbd_bufaddr = cpu_to_fec32(0); - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep, q); + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); txq->dirty_tx = bdp; } @@ -880,7 +831,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_rx_queues; i++) { rxq = fep->rx_queue[i]; - writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); + writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); /* enable DMA1/2 */ @@ -891,7 +842,7 @@ static void fec_enet_enable_ring(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) { txq = fep->tx_queue[i]; - writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); + writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); /* enable DMA1/2 */ if (i) @@ -909,7 +860,7 @@ static void fec_enet_reset_skb(struct net_device *ndev) for (i = 0; i < fep->num_tx_queues; i++) { txq = fep->tx_queue[i]; - for (j = 0; j < txq->tx_ring_size; j++) { + for (j = 0; j < txq->bd.ring_size; j++) { if (txq->tx_skbuff[j]) { dev_kfree_skb_any(txq->tx_skbuff[j]); txq->tx_skbuff[j] = NULL; @@ -1222,16 +1173,16 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) bdp = txq->dirty_tx; /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); - while (bdp != READ_ONCE(txq->cur_tx)) { - /* Order the load of cur_tx and cbd_sc */ + while (bdp != READ_ONCE(txq->bd.cur)) { + /* Order the load of bd.cur and cbd_sc */ rmb(); status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); if (status & BD_ENET_TX_READY) break; - index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &txq->bd); skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; @@ -1242,7 +1193,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) DMA_TO_DEVICE); bdp->cbd_bufaddr = cpu_to_fec32(0); if (!skb) { - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); continue; } @@ -1291,19 +1242,19 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) txq->dirty_tx = bdp; /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); /* Since we have freed up a buffer, the ring is no longer full */ if (netif_queue_stopped(ndev)) { - entries_free = fec_enet_get_free_txdesc_num(fep, txq); + entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free >= txq->tx_wake_threshold) netif_tx_wake_queue(nq); } } /* ERR006538: Keep the transmitter going */ - if (bdp != txq->cur_tx && + if (bdp != txq->bd.cur && readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); } @@ -1367,7 +1318,7 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, return true; } -/* During a receive, the cur_rx points to the current incoming buffer. +/* During a receive, the bd_rx.cur points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. @@ -1400,7 +1351,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = rxq->cur_rx; + bdp = rxq->bd.cur; while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { @@ -1441,7 +1392,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) pkt_len = fec16_to_cpu(bdp->cbd_datlen); ndev->stats.rx_bytes += pkt_len; - index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); + index = fec_enet_get_bd_index(bdp, &rxq->bd); skb = rxq->rx_skbuff[index]; /* The packet length includes FCS, but we don't want to @@ -1541,7 +1492,7 @@ rx_processing_done: } /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be @@ -1549,7 +1500,7 @@ rx_processing_done: */ writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); } - rxq->cur_rx = bdp; + rxq->bd.cur = bdp; return pkt_received; } @@ -2658,8 +2609,8 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { skb = rxq->rx_skbuff[i]; rxq->rx_skbuff[i] = NULL; if (skb) { @@ -2669,14 +2620,14 @@ static void fec_enet_free_buffers(struct net_device *ndev) DMA_FROM_DEVICE); dev_kfree_skb(skb); } - bdp = fec_enet_get_nextdesc(bdp, fep, q); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } } for (q = 0; q < fep->num_tx_queues; q++) { txq = fep->tx_queue[q]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; skb = txq->tx_skbuff[i]; @@ -2696,7 +2647,7 @@ static void fec_enet_free_queue(struct net_device *ndev) if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { txq = fep->tx_queue[i]; dma_free_coherent(NULL, - txq->tx_ring_size * TSO_HEADER_SIZE, + txq->bd.ring_size * TSO_HEADER_SIZE, txq->tso_hdrs, txq->tso_hdrs_dma); } @@ -2722,15 +2673,15 @@ static int fec_enet_alloc_queue(struct net_device *ndev) } fep->tx_queue[i] = txq; - txq->tx_ring_size = TX_RING_SIZE; - fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; + txq->bd.ring_size = TX_RING_SIZE; + fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; txq->tx_wake_threshold = - (txq->tx_ring_size - txq->tx_stop_threshold) / 2; + (txq->bd.ring_size - txq->tx_stop_threshold) / 2; txq->tso_hdrs = dma_alloc_coherent(NULL, - txq->tx_ring_size * TSO_HEADER_SIZE, + txq->bd.ring_size * TSO_HEADER_SIZE, &txq->tso_hdrs_dma, GFP_KERNEL); if (!txq->tso_hdrs) { @@ -2747,8 +2698,8 @@ static int fec_enet_alloc_queue(struct net_device *ndev) goto alloc_failed; } - fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; - fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; + fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; + fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; } return ret; @@ -2767,8 +2718,8 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) struct fec_enet_priv_rx_q *rxq; rxq = fep->rx_queue[queue]; - bdp = rxq->rx_bd_base; - for (i = 0; i < rxq->rx_ring_size; i++) { + bdp = rxq->bd.base; + for (i = 0; i < rxq->bd.ring_size; i++) { skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); if (!skb) goto err_alloc; @@ -2786,11 +2737,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, queue); + bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; @@ -2808,8 +2759,8 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) struct fec_enet_priv_tx_q *txq; txq = fep->tx_queue[queue]; - bdp = txq->tx_bd_base; - for (i = 0; i < txq->tx_ring_size; i++) { + bdp = txq->bd.base; + for (i = 0; i < txq->bd.ring_size; i++) { txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); if (!txq->tx_bounce[i]) goto err_alloc; @@ -2822,11 +2773,11 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); } - bdp = fec_enet_get_nextdesc(bdp, fep, queue); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep, queue); + bdp = fec_enet_get_prevdesc(bdp, &txq->bd); bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; @@ -3117,13 +3068,15 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_enet_priv_tx_q *txq; - struct fec_enet_priv_rx_q *rxq; struct bufdesc *cbd_base; dma_addr_t bd_dma; int bd_size; unsigned int i; + unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : + sizeof(struct bufdesc); + unsigned dsize_log2 = __fls(dsize); + WARN_ON(dsize != (1 << dsize_log2)); #if defined(CONFIG_ARM) fep->rx_align = 0xf; fep->tx_align = 0xf; @@ -3134,12 +3087,7 @@ static int fec_enet_init(struct net_device *ndev) fec_enet_alloc_queue(ndev); - if (fep->bufdesc_ex) - fep->bufdesc_size = sizeof(struct bufdesc_ex); - else - fep->bufdesc_size = sizeof(struct bufdesc); - bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * - fep->bufdesc_size; + bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; /* Allocate memory for buffer descriptors. */ cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, @@ -3157,33 +3105,33 @@ static int fec_enet_init(struct net_device *ndev) /* Set receive and transmit descriptor base. */ for (i = 0; i < fep->num_rx_queues; i++) { - rxq = fep->rx_queue[i]; - rxq->index = i; - rxq->rx_bd_base = (struct bufdesc *)cbd_base; - rxq->bd_dma = bd_dma; - if (fep->bufdesc_ex) { - bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; - cbd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); - } else { - bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; - cbd_base += rxq->rx_ring_size; - } + struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; + unsigned size = dsize * rxq->bd.ring_size; + + rxq->bd.qid = i; + rxq->bd.base = cbd_base; + rxq->bd.cur = cbd_base; + rxq->bd.dma = bd_dma; + rxq->bd.dsize = dsize; + rxq->bd.dsize_log2 = dsize_log2; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); } for (i = 0; i < fep->num_tx_queues; i++) { - txq = fep->tx_queue[i]; - txq->index = i; - txq->tx_bd_base = (struct bufdesc *)cbd_base; - txq->bd_dma = bd_dma; - if (fep->bufdesc_ex) { - bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; - cbd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); - } else { - bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; - cbd_base += txq->tx_ring_size; - } + struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; + unsigned size = dsize * txq->bd.ring_size; + + txq->bd.qid = i; + txq->bd.base = cbd_base; + txq->bd.cur = cbd_base; + txq->bd.dma = bd_dma; + txq->bd.dsize = dsize; + txq->bd.dsize_log2 = dsize_log2; + bd_dma += size; + cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); + txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); } -- cgit v1.2.3 From 53bb20d1fabacc6601e68a6a69d9d1e3aff6bd69 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:47 -0700 Subject: net: fec: add variable reg_desc_active to speed things up There is no need for complex macros every time we need to activate a queue. Also, no need to call skb_get_queue_mapping when we already know which queue it is using. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 7 +---- drivers/net/ethernet/freescale/fec_main.c | 44 +++++++++++++++++-------------- 2 files changed, 25 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 53ec04fbdc6d..bedd28a6e8f5 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -310,12 +310,6 @@ struct bufdesc_ex { #define FEC_R_BUFF_SIZE(X) (((X) == 1) ? FEC_R_BUFF_SIZE_1 : \ (((X) == 2) ? \ FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0)) -#define FEC_R_DES_ACTIVE(X) (((X) == 1) ? FEC_R_DES_ACTIVE_1 : \ - (((X) == 2) ? \ - FEC_R_DES_ACTIVE_2 : FEC_R_DES_ACTIVE_0)) -#define FEC_X_DES_ACTIVE(X) (((X) == 1) ? FEC_X_DES_ACTIVE_1 : \ - (((X) == 2) ? \ - FEC_X_DES_ACTIVE_2 : FEC_X_DES_ACTIVE_0)) #define FEC_DMA_CFG(X) (((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1) @@ -454,6 +448,7 @@ struct bufdesc_prop { struct bufdesc *base; struct bufdesc *last; struct bufdesc *cur; + void __iomem *reg_desc_active; dma_addr_t dma; unsigned short ring_size; unsigned char dsize; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index b039288f5c98..712e3bb1e0d3 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -328,7 +328,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, struct bufdesc *bdp = txq->bd.cur; struct bufdesc_ex *ebdp; int nr_frags = skb_shinfo(skb)->nr_frags; - unsigned short queue = skb_get_queue_mapping(skb); int frag, frag_len; unsigned short status; unsigned int estatus = 0; @@ -361,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -415,7 +414,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, dma_addr_t addr; unsigned short status; unsigned short buflen; - unsigned short queue; unsigned int estatus = 0; unsigned int index; int entries_free; @@ -444,7 +442,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, bufaddr = skb->data; buflen = skb_headlen(skb); - queue = skb_get_queue_mapping(skb); index = fec_enet_get_bd_index(bdp, &txq->bd); if (((unsigned long) bufaddr) & fep->tx_align || fep->quirks & FEC_QUIRK_SWAP_FRAME) { @@ -487,7 +484,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; @@ -521,7 +518,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, txq->bd.cur = bdp; /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + writel(0, txq->bd.reg_desc_active); return 0; } @@ -534,7 +531,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, { struct fec_enet_private *fep = netdev_priv(ndev); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); - unsigned short queue = skb_get_queue_mapping(skb); unsigned short status; unsigned int estatus = 0; dma_addr_t addr; @@ -566,7 +562,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -595,7 +591,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, struct fec_enet_private *fep = netdev_priv(ndev); int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); - unsigned short queue = skb_get_queue_mapping(skb); void *bufaddr; unsigned long dmabuf; unsigned short status; @@ -630,7 +625,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) - estatus |= FEC_TX_BD_FTYPE(queue); + estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; @@ -650,7 +645,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); int total_len, data_left; struct bufdesc *bdp = txq->bd.cur; - unsigned short queue = skb_get_queue_mapping(skb); struct tso_t tso; unsigned int index = 0; int ret; @@ -715,11 +709,11 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, /* Trigger transmission start */ if (!(fep->quirks & FEC_QUIRK_ERR007885) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || - !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active) || + !readl(txq->bd.reg_desc_active)) + writel(0, txq->bd.reg_desc_active); return 0; @@ -819,7 +813,7 @@ static void fec_enet_active_rxring(struct net_device *ndev) int i; for (i = 0; i < fep->num_rx_queues; i++) - writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); + writel(0, fep->rx_queue[i]->bd.reg_desc_active); } static void fec_enet_enable_ring(struct net_device *ndev) @@ -1255,8 +1249,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) /* ERR006538: Keep the transmitter going */ if (bdp != txq->bd.cur && - readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) - writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); + readl(txq->bd.reg_desc_active) == 0) + writel(0, txq->bd.reg_desc_active); } static void @@ -1498,7 +1492,7 @@ rx_processing_done: * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ - writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); + writel(0, rxq->bd.reg_desc_active); } rxq->bd.cur = bdp; return pkt_received; @@ -3061,6 +3055,14 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_set_features = fec_set_features, }; +static const unsigned short offset_des_active_rxq[] = { + FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 +}; + +static const unsigned short offset_des_active_txq[] = { + FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 +}; + /* * XXX: We need to clean up on failure exits here. * @@ -3114,6 +3116,7 @@ static int fec_enet_init(struct net_device *ndev) rxq->bd.dma = bd_dma; rxq->bd.dsize = dsize; rxq->bd.dsize_log2 = dsize_log2; + rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; bd_dma += size; cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); @@ -3129,6 +3132,7 @@ static int fec_enet_init(struct net_device *ndev) txq->bd.dma = bd_dma; txq->bd.dsize = dsize; txq->bd.dsize_log2 = dsize_log2; + txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; bd_dma += size; cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); -- cgit v1.2.3 From 80dc6a9f8e0dfb3f2c516273b2faa29c866974b2 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:48 -0700 Subject: net: fec: don't disable FEC_ENET_TS_TIMER interrupt Only the interrupt routine processes this condition. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec.h | 1 + drivers/net/ethernet/freescale/fec_main.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index bedd28a6e8f5..195122e11f10 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -375,6 +375,7 @@ struct bufdesc_ex { #define FEC_ENET_TS_TIMER ((uint)0x00008000) #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) +#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) /* ENET interrupt coalescing macro define */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 712e3bb1e0d3..ca2708d13dfe 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1553,7 +1553,7 @@ fec_enet_interrupt(int irq, void *dev_id) if (napi_schedule_prep(&fep->napi)) { /* Disable the NAPI interrupts */ - writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); __napi_schedule(&fep->napi); } } -- cgit v1.2.3 From be293467b87c30cfdb24f74dcb225b693e436121 Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:49 -0700 Subject: net: fec: don't transfer ownership until descriptor write is complete If you don't own it, you shouldn't write to it. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ca2708d13dfe..97ca72a6aaf4 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -390,6 +390,10 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_bufaddr = cpu_to_fec32(addr); bdp->cbd_datlen = cpu_to_fec16(frag_len); + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); bdp->cbd_sc = cpu_to_fec16(status); } @@ -499,6 +503,10 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, bdp->cbd_datlen = cpu_to_fec16(buflen); bdp->cbd_bufaddr = cpu_to_fec32(addr); + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + wmb(); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. @@ -1475,7 +1483,6 @@ rx_processing_done: /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = cpu_to_fec16(status); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; @@ -1484,6 +1491,11 @@ rx_processing_done: ebdp->cbd_prot = 0; ebdp->cbd_bdu = 0; } + /* Make sure the updates to rest of the descriptor are + * performed before transferring ownership. + */ + wmb(); + bdp->cbd_sc = cpu_to_fec16(status); /* Update BD pointer to next entry */ bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); -- cgit v1.2.3 From fc75ba5159a63fad40442b9a9527a0f9b06de79b Mon Sep 17 00:00:00 2001 From: Troy Kisky Date: Fri, 5 Feb 2016 14:52:50 -0700 Subject: net: fec: improve error handling Unmap initial buffer on error. Don't free skb until it has been unmapped. Move cbd_bufaddr assignment closer to the mapping function. Signed-off-by: Troy Kisky Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fec_main.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 97ca72a6aaf4..ef18ca501f9e 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -382,7 +382,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, addr)) { - dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); goto dma_mapping_error; @@ -467,8 +466,12 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, if (nr_frags) { last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); - if (IS_ERR(last_bdp)) + if (IS_ERR(last_bdp)) { + dma_unmap_single(&fep->pdev->dev, addr, + buflen, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); return NETDEV_TX_OK; + } } else { status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) { @@ -478,6 +481,8 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, estatus |= BD_ENET_TX_TS; } } + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(buflen); if (fep->bufdesc_ex) { @@ -501,8 +506,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, /* Save skb pointer */ txq->tx_skbuff[index] = skb; - bdp->cbd_datlen = cpu_to_fec16(buflen); - bdp->cbd_bufaddr = cpu_to_fec32(addr); /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. */ -- cgit v1.2.3 From 9d3053ef57599025b93aab76324ae42c60d0bac2 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Feb 2016 11:43:28 +0530 Subject: iw_cxgb4: make queue allocation code more readable Rename local mm* variables to more meaningful names Signed-off-by: Steve Wise Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/infiniband/hw/cxgb4/qp.c | 81 +++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index e99345eb875a..dadf5f1855b3 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1621,7 +1621,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, unsigned int sqsize, rqsize; struct c4iw_ucontext *ucontext; int ret; - struct c4iw_mm_entry *mm1, *mm2, *mm3, *mm4, *mm5 = NULL; + struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; + struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; PDBG("%s ib_pd %p\n", __func__, pd); @@ -1706,29 +1707,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, goto err2; if (udata) { - mm1 = kmalloc(sizeof *mm1, GFP_KERNEL); - if (!mm1) { + sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL); + if (!sq_key_mm) { ret = -ENOMEM; goto err3; } - mm2 = kmalloc(sizeof *mm2, GFP_KERNEL); - if (!mm2) { + rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL); + if (!rq_key_mm) { ret = -ENOMEM; goto err4; } - mm3 = kmalloc(sizeof *mm3, GFP_KERNEL); - if (!mm3) { + sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL); + if (!sq_db_key_mm) { ret = -ENOMEM; goto err5; } - mm4 = kmalloc(sizeof *mm4, GFP_KERNEL); - if (!mm4) { + rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL); + if (!rq_db_key_mm) { ret = -ENOMEM; goto err6; } if (t4_sq_onchip(&qhp->wq.sq)) { - mm5 = kmalloc(sizeof *mm5, GFP_KERNEL); - if (!mm5) { + ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm), + GFP_KERNEL); + if (!ma_sync_key_mm) { ret = -ENOMEM; goto err7; } @@ -1743,7 +1745,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, uresp.rq_size = qhp->wq.rq.size; uresp.rq_memsize = qhp->wq.rq.memsize; spin_lock(&ucontext->mmap_lock); - if (mm5) { + if (ma_sync_key_mm) { uresp.ma_sync_key = ucontext->key; ucontext->key += PAGE_SIZE; } else { @@ -1761,28 +1763,29 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ret = ib_copy_to_udata(udata, &uresp, sizeof uresp); if (ret) goto err8; - mm1->key = uresp.sq_key; - mm1->addr = qhp->wq.sq.phys_addr; - mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); - insert_mmap(ucontext, mm1); - mm2->key = uresp.rq_key; - mm2->addr = virt_to_phys(qhp->wq.rq.queue); - mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); - insert_mmap(ucontext, mm2); - mm3->key = uresp.sq_db_gts_key; - mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa; - mm3->len = PAGE_SIZE; - insert_mmap(ucontext, mm3); - mm4->key = uresp.rq_db_gts_key; - mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa; - mm4->len = PAGE_SIZE; - insert_mmap(ucontext, mm4); - if (mm5) { - mm5->key = uresp.ma_sync_key; - mm5->addr = (pci_resource_start(rhp->rdev.lldi.pdev, 0) - + PCIE_MA_SYNC_A) & PAGE_MASK; - mm5->len = PAGE_SIZE; - insert_mmap(ucontext, mm5); + sq_key_mm->key = uresp.sq_key; + sq_key_mm->addr = qhp->wq.sq.phys_addr; + sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); + insert_mmap(ucontext, sq_key_mm); + rq_key_mm->key = uresp.rq_key; + rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); + rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); + insert_mmap(ucontext, rq_key_mm); + sq_db_key_mm->key = uresp.sq_db_gts_key; + sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; + sq_db_key_mm->len = PAGE_SIZE; + insert_mmap(ucontext, sq_db_key_mm); + rq_db_key_mm->key = uresp.rq_db_gts_key; + rq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.rq.bar2_pa; + rq_db_key_mm->len = PAGE_SIZE; + insert_mmap(ucontext, rq_db_key_mm); + if (ma_sync_key_mm) { + ma_sync_key_mm->key = uresp.ma_sync_key; + ma_sync_key_mm->addr = + (pci_resource_start(rhp->rdev.lldi.pdev, 0) + + PCIE_MA_SYNC_A) & PAGE_MASK; + ma_sync_key_mm->len = PAGE_SIZE; + insert_mmap(ucontext, ma_sync_key_mm); } } qhp->ibqp.qp_num = qhp->wq.sq.qid; @@ -1795,15 +1798,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, qhp->wq.rq.memsize, attrs->cap.max_recv_wr); return &qhp->ibqp; err8: - kfree(mm5); + kfree(ma_sync_key_mm); err7: - kfree(mm4); + kfree(rq_db_key_mm); err6: - kfree(mm3); + kfree(sq_db_key_mm); err5: - kfree(mm2); + kfree(rq_key_mm); err4: - kfree(mm1); + kfree(sq_key_mm); err3: remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); err2: -- cgit v1.2.3 From 6102c66eeb7497e4462c896074315635807758d4 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Feb 2016 11:43:29 +0530 Subject: iw_cxgb4: remove false error log entry Don't log errors if a listening endpoint is going away when procesing a PASS_ACCEPT_REQ message. This can happen. Change the error printk to a PDBG() debug log entry Signed-off-by: Steve Wise Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/infiniband/hw/cxgb4/cm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index cd2ff5f9518a..0c2111b5b2e4 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2399,8 +2399,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) } if (state_read(&parent_ep->com) != LISTEN) { - printk(KERN_ERR "%s - listening ep not in LISTEN\n", - __func__); + PDBG("%s - listening ep not in LISTEN\n", __func__); goto reject; } -- cgit v1.2.3 From ba9cee6aa67dd67ad1bf121ee20efc00241f086f Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Fri, 5 Feb 2016 11:43:30 +0530 Subject: cxgb4/iw_cxgb4: TOS support This series provides support for iWARP applications to specify a TOS value and have that map to a VLAN Priority for iw_cxgb4 iWARP connections. In iw_cxgb4, when allocating an L2T entry, pass the skb_priority based on the tos value in the cm_id. Also pass the correct tos value during connection setup so the passive side gets the client's desired tos. When sending the FLOWC work request to FW, if the egress device is in a vlan, then use the vlan priority bits as the scheduling class. This allows associating RDMA connections with scheduling classes to provide traffic shaping per flow. Signed-off-by: Steve Wise Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/infiniband/hw/cxgb4/cm.c | 51 ++++++++++++++++++--------- drivers/net/ethernet/chelsio/cxgb4/t4_msg.h | 2 ++ drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | 1 + 3 files changed, 37 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0c2111b5b2e4..8cd7b5e4b03d 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -485,12 +485,19 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) unsigned int flowclen = 80; struct fw_flowc_wr *flowc; int i; + u16 vlan = ep->l2t->vlan; + int nparams; + + if (vlan == CPL_L2T_VLAN_NONE) + nparams = 8; + else + nparams = 9; skb = get_skb(skb, flowclen, GFP_KERNEL); flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | - FW_FLOWC_WR_NPARAMS_V(8)); + FW_FLOWC_WR_NPARAMS_V(nparams)); flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen, 16)) | FW_WR_FLOWID_V(ep->hwtid)); @@ -511,9 +518,17 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; flowc->mnemval[7].val = cpu_to_be32(ep->emss); - /* Pad WR to 16 byte boundary */ - flowc->mnemval[8].mnemonic = 0; - flowc->mnemval[8].val = 0; + if (nparams == 9) { + u16 pri; + + pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; + flowc->mnemval[8].val = cpu_to_be32(pri); + } else { + /* Pad WR to 16 byte boundary */ + flowc->mnemval[8].mnemonic = 0; + flowc->mnemval[8].val = 0; + } for (i = 0; i < 9; i++) { flowc->mnemval[i].r4[0] = 0; flowc->mnemval[i].r4[1] = 0; @@ -710,7 +725,7 @@ static int send_connect(struct c4iw_ep *ep) L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP_V(ep->tos) | + DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win); opt2 = RX_CHANNEL_V(0) | @@ -1864,7 +1879,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) L2T_IDX_V(ep->l2t->idx) | TX_CHAN_V(ep->tx_chan) | SMAC_SEL_V(ep->smac_idx) | - DSCP_V(ep->tos) | + DSCP_V(ep->tos >> 2) | ULP_MODE_V(ULP_MODE_TCPDDP) | RCV_BUFSIZ_V(win)); req->tcb.opt2 = (__force __be32) (PACE_V(1) | @@ -1928,7 +1943,7 @@ static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, struct dst_entry *dst, struct c4iw_dev *cdev, - bool clear_mpa_v1, enum chip_type adapter_type) + bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) { struct neighbour *n; int err, step; @@ -1958,7 +1973,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, goto out; } ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, - n, pdev, 0); + n, pdev, rt_tos2priority(tos)); if (!ep->l2t) goto out; ep->mtu = pdev->mtu; @@ -2041,7 +2056,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) if (ep->com.cm_id->local_addr.ss_family == AF_INET) { ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port, - raddr->sin_port, 0); + raddr->sin_port, ep->com.cm_id->tos); iptype = 4; ra = (__u8 *)&raddr->sin_addr; } else { @@ -2058,7 +2073,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep) goto fail3; } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, - ep->com.dev->rdev.lldi.adapter_type); + ep->com.dev->rdev.lldi.adapter_type, + ep->com.cm_id->tos); if (err) { pr_err("%s - cannot alloc l2e.\n", __func__); goto fail4; @@ -2069,7 +2085,7 @@ static int c4iw_reconnect(struct c4iw_ep *ep) ep->l2t->idx); state_set(&ep->com, CONNECTING); - ep->tos = 0; + ep->tos = ep->com.cm_id->tos; /* send connect request to rnic */ err = send_connect(ep); @@ -2391,6 +2407,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) u16 peer_mss = ntohs(req->tcpopt.mss); int iptype; unsigned short hdrs; + u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); parent_ep = lookup_stid(t, stid); if (!parent_ep) { @@ -2414,7 +2431,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) ntohs(peer_port), peer_mss); dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, local_port, peer_port, - PASS_OPEN_TOS_G(ntohl(req->tos_stid))); + tos); } else { PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" , __func__, parent_ep, hwtid, @@ -2440,7 +2457,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) } err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, - parent_ep->com.dev->rdev.lldi.adapter_type); + parent_ep->com.dev->rdev.lldi.adapter_type, tos); if (err) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -2508,7 +2525,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) c4iw_get_ep(&parent_ep->com); child_ep->parent_ep = parent_ep; - child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); + child_ep->tos = tos; child_ep->dst = dst; child_ep->hwtid = hwtid; @@ -3202,7 +3219,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ra, ntohs(raddr->sin_port)); ep->dst = find_route(dev, laddr->sin_addr.s_addr, raddr->sin_addr.s_addr, laddr->sin_port, - raddr->sin_port, 0); + raddr->sin_port, cm_id->tos); } else { iptype = 6; ra = (__u8 *)&raddr6->sin6_addr; @@ -3233,7 +3250,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) } err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, - ep->com.dev->rdev.lldi.adapter_type); + ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); if (err) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); goto fail3; @@ -3244,7 +3261,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) ep->l2t->idx); state_set(&ep->com, CONNECTING); - ep->tos = 0; + ep->tos = cm_id->tos; /* send connect request to rnic */ err = send_connect(ep); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index a072d341e205..1d2d1da40c80 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -1021,6 +1021,8 @@ struct cpl_l2t_write_req { #define L2T_W_NOREPLY_V(x) ((x) << L2T_W_NOREPLY_S) #define L2T_W_NOREPLY_F L2T_W_NOREPLY_V(1U) +#define CPL_L2T_VLAN_NONE 0xfff + struct cpl_l2t_write_rpl { union opcode_tid ot; u8 status; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a32de30ea663..c8661c77b4e3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -561,6 +561,7 @@ enum fw_flowc_mnem { FW_FLOWC_MNEM_SNDBUF, FW_FLOWC_MNEM_MSS, FW_FLOWC_MNEM_TXDATAPLEN_MAX, + FW_FLOWC_MNEM_SCHEDCLASS = 11, }; struct fw_flowc_mnemval { -- cgit v1.2.3 From a7ad40d00af18bff49437605f2ec148b80b16768 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Fri, 5 Feb 2016 11:16:50 +0000 Subject: sfc: implement IPv6 NFC (and IPV4_USER_FLOW) Signed-off-by: Edward Cree Reviewed-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ethtool.c | 184 +++++++++++++++++++++++++++++++++++++ 1 file changed, 184 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 034797661f96..445ccdb6bc67 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -783,14 +783,26 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0}; #define IP4_ADDR_FULL_MASK ((__force __be32)~0) +#define IP_PROTO_FULL_MASK 0xFF #define PORT_FULL_MASK ((__force __be16)~0) #define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static inline void ip6_fill_mask(__be32 *mask) +{ + mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0; +} + static int efx_ethtool_get_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; @@ -833,6 +845,35 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, ip_entry->psrc = spec.rem_port; ip_mask->psrc = PORT_FULL_MASK; } + } else if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) && + spec.ether_type == htons(ETH_P_IPV6) && + (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) && + (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO | + EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) { + rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ? + TCP_V6_FLOW : UDP_V6_FLOW); + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(ip6_entry->ip6dst, spec.loc_host, + sizeof(ip6_entry->ip6dst)); + ip6_fill_mask(ip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(ip6_entry->ip6src, spec.rem_host, + sizeof(ip6_entry->ip6src)); + ip6_fill_mask(ip6_mask->ip6src); + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) { + ip6_entry->pdst = spec.loc_port; + ip6_mask->pdst = PORT_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) { + ip6_entry->psrc = spec.rem_port; + ip6_mask->psrc = PORT_FULL_MASK; + } } else if (!(spec.match_flags & ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG | EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE | @@ -855,6 +896,47 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, mac_entry->h_proto = spec.ether_type; mac_mask->h_proto = ETHER_TYPE_FULL_MASK; } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IP) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV4_USER_FLOW; + uip_entry->ip_ver = ETH_RX_NFC_IP4; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip_mask->proto = IP_PROTO_FULL_MASK; + uip_entry->proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + uip_entry->ip4dst = spec.loc_host[0]; + uip_mask->ip4dst = IP4_ADDR_FULL_MASK; + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + uip_entry->ip4src = spec.rem_host[0]; + uip_mask->ip4src = IP4_ADDR_FULL_MASK; + } + } else if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE && + spec.ether_type == htons(ETH_P_IPV6) && + !(spec.match_flags & + ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID | + EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST | + EFX_FILTER_MATCH_IP_PROTO))) { + rule->flow_type = IPV6_USER_FLOW; + if (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) { + uip6_mask->l4_proto = IP_PROTO_FULL_MASK; + uip6_entry->l4_proto = spec.ip_proto; + } + if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) { + memcpy(uip6_entry->ip6dst, spec.loc_host, + sizeof(uip6_entry->ip6dst)); + ip6_fill_mask(uip6_mask->ip6dst); + } + if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) { + memcpy(uip6_entry->ip6src, spec.rem_host, + sizeof(uip6_entry->ip6src)); + ip6_fill_mask(uip6_mask->ip6src); + } } else { /* The above should handle all filters that we insert */ WARN_ON(1); @@ -946,11 +1028,27 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev, } } +static inline bool ip6_mask_is_full(__be32 mask[4]) +{ + return !~(mask[0] & mask[1] & mask[2] & mask[3]); +} + +static inline bool ip6_mask_is_empty(__be32 mask[4]) +{ + return !(mask[0] | mask[1] | mask[2] | mask[3]); +} + static int efx_ethtool_set_class_rule(struct efx_nic *efx, struct ethtool_rx_flow_spec *rule) { struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec; struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec; + struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec; + struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec; + struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec; + struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec; + struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec; + struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec; struct ethhdr *mac_entry = &rule->h_u.ether_spec; struct ethhdr *mac_mask = &rule->m_u.ether_spec; struct efx_filter_spec spec; @@ -1012,6 +1110,92 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, return -EINVAL; break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE | + EFX_FILTER_MATCH_IP_PROTO); + spec.ether_type = htons(ETH_P_IPV6); + spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ? + IPPROTO_TCP : IPPROTO_UDP); + if (!ip6_mask_is_empty(ip6_mask->ip6dst)) { + if (!ip6_mask_is_full(ip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(ip6_mask->ip6src)) { + if (!ip6_mask_is_full(ip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (ip6_mask->pdst) { + if (ip6_mask->pdst != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT; + spec.loc_port = ip6_entry->pdst; + } + if (ip6_mask->psrc) { + if (ip6_mask->psrc != PORT_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_PORT; + spec.rem_port = ip6_entry->psrc; + } + if (ip6_mask->tclass) + return -EINVAL; + break; + + case IPV4_USER_FLOW: + if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver || + uip_entry->ip_ver != ETH_RX_NFC_IP4) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IP); + if (uip_mask->ip4dst) { + if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + spec.loc_host[0] = uip_entry->ip4dst; + } + if (uip_mask->ip4src) { + if (uip_mask->ip4src != IP4_ADDR_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + spec.rem_host[0] = uip_entry->ip4src; + } + if (uip_mask->proto) { + if (uip_mask->proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip_entry->proto; + } + break; + + case IPV6_USER_FLOW: + if (uip6_mask->l4_4_bytes || uip6_mask->tclass) + return -EINVAL; + spec.match_flags = EFX_FILTER_MATCH_ETHER_TYPE; + spec.ether_type = htons(ETH_P_IPV6); + if (!ip6_mask_is_empty(uip6_mask->ip6dst)) { + if (!ip6_mask_is_full(uip6_mask->ip6dst)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST; + memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host)); + } + if (!ip6_mask_is_empty(uip6_mask->ip6src)) { + if (!ip6_mask_is_full(uip6_mask->ip6src)) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_REM_HOST; + memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host)); + } + if (uip6_mask->l4_proto) { + if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK) + return -EINVAL; + spec.match_flags |= EFX_FILTER_MATCH_IP_PROTO; + spec.ip_proto = uip6_entry->l4_proto; + } + break; + case ETHER_FLOW: if (!is_zero_ether_addr(mac_mask->h_dest)) { if (ether_addr_equal(mac_mask->h_dest, -- cgit v1.2.3 From a060679c6b3da17dc9e95d0500f811de118ec901 Mon Sep 17 00:00:00 2001 From: "sixiao@microsoft.com" Date: Thu, 4 Feb 2016 15:49:34 -0800 Subject: hv_netvsc: cleanup netdev feature flags for netvsc 1. Adding NETIF_F_TSO6 feature flag; 2. Adding NETIF_F_HW_CSUM. NETIF_F_IPV6_CSUM and NETIF_F_IP_CSUM are being deprecated; 3. Cleanup the coding style of flag assignment by using macro. Signed-off-by: Simon Xiao Reviewed-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1d3a66563bac..c72e5b83afdb 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -43,6 +43,11 @@ #define RING_SIZE_MIN 64 #define LINKCHANGE_INT (2 * HZ) +#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_HW_CSUM) static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); @@ -1081,10 +1086,8 @@ static int netvsc_probe(struct hv_device *dev, net->netdev_ops = &device_ops; - net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_TSO; - net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM | - NETIF_F_IP_CSUM | NETIF_F_TSO; + net->hw_features = NETVSC_HW_FEATURES; + net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX; net->ethtool_ops = ðtool_ops; SET_NETDEV_DEV(net, &dev->device); -- cgit v1.2.3 From 6d9b6f424d0806e2a7f3e7f0a74170ec3ad30443 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Sun, 7 Feb 2016 10:56:25 +0530 Subject: bonding: Return correct error code The return value of kzalloc on failure of allocation of memory should be -ENOMEM and not -1. Found using Coccinelle. A simplified version of the semantic patch used is: // @@ expression *e; @@ e = kzalloc(...); if (e == NULL) { ... return - -1 + -ENOMEM ; } // The single call site only checks that the return value is not 0, hence no change is required at the call site. Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/net/bonding/bond_alb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index bb9e9fc45e1b..c5ac160a8ae9 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -159,7 +159,7 @@ static int tlb_initialize(struct bonding *bond) new_hashtbl = kzalloc(size, GFP_KERNEL); if (!new_hashtbl) - return -1; + return -ENOMEM; spin_lock_bh(&bond->mode_lock); -- cgit v1.2.3 From 40d29af05703555b03c689e5cecc34bdea28fa89 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 9 Feb 2016 22:07:29 -0800 Subject: vxlan: udp_tunnel duplicate include net/udp_tunnel.h Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 65f52472a52c..ebf57d90d295 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -42,7 +42,7 @@ #include #include #include -#include + #if IS_ENABLED(CONFIG_IPV6) #include #include -- cgit v1.2.3 From 809dc75e9b82fcae9c3c48b4eec4708c005259f3 Mon Sep 17 00:00:00 2001 From: stephen hemminger Date: Tue, 9 Feb 2016 22:11:27 -0800 Subject: vrf: duplicate include of rtnetlink.h Signed-off-by: Stephen Hemminger Signed-off-by: David S. Miller --- drivers/net/vrf.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 76e1fc9d8748..9ce088bb28ab 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From e6515203202a913816abb3fb808e71e3a2a0967e Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 10 Feb 2016 10:08:54 +0530 Subject: net: hamradio: baycom_ser_fdx: Replace timeval with timespec64 32 bit systems using 'struct timeval' will break in the year 2038, so we replace the code appropriately. However, this driver is not broken in 2038 since we are only using microseconds portion of the time. This patch replaces 'struct timeval' with 'struct timespec64'. We only need to find elapsed microseconds rather than absolute time, so it's better to use monotonic time, so using ktime_get_ts64() makes the code more efficient and more robust against concurrent settimeofday() calls. Signed-off-by: Amitoj Kaur Chawla Reviewed-by: Arnd Bergmann Reviewed-by: Thomas Sailer Signed-off-by: David S. Miller --- drivers/net/hamradio/baycom_ser_fdx.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 636b65c66d49..7b916d5b14b9 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c @@ -80,6 +80,7 @@ #include #include #include +#include #include #include @@ -228,14 +229,15 @@ static inline unsigned int hweight8(unsigned int w) /* --------------------------------------------------------------------- */ -static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timeval *tv, unsigned char curs) +static __inline__ void ser12_rx(struct net_device *dev, struct baycom_state *bc, struct timespec64 *ts, unsigned char curs) { int timediff; int bdus8 = bc->baud_us >> 3; int bdus4 = bc->baud_us >> 2; int bdus2 = bc->baud_us >> 1; - timediff = 1000000 + tv->tv_usec - bc->modem.ser12.pll_time; + timediff = 1000000 + ts->tv_nsec / NSEC_PER_USEC - + bc->modem.ser12.pll_time; while (timediff >= 500000) timediff -= 1000000; while (timediff >= bdus2) { @@ -287,7 +289,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct baycom_state *bc = netdev_priv(dev); - struct timeval tv; + struct timespec64 ts; unsigned char iir, msr; unsigned int txcount = 0; @@ -297,7 +299,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id) if ((iir = inb(IIR(dev->base_addr))) & 1) return IRQ_NONE; /* get current time */ - do_gettimeofday(&tv); + ktime_get_ts64(&ts); msr = inb(MSR(dev->base_addr)); /* delta DCD */ if ((msr & 8) && bc->opt_dcd) @@ -340,7 +342,7 @@ static irqreturn_t ser12_interrupt(int irq, void *dev_id) } iir = inb(IIR(dev->base_addr)); } while (!(iir & 1)); - ser12_rx(dev, bc, &tv, msr & 0x10); /* CTS */ + ser12_rx(dev, bc, &ts, msr & 0x10); /* CTS */ if (bc->modem.ptt && txcount) { if (bc->modem.ser12.txshreg <= 1) { bc->modem.ser12.txshreg = 0x10000 | hdlcdrv_getbits(&bc->hdrv); -- cgit v1.2.3 From 3e2a5e1539064329f5f6888274600841ad6f87bd Mon Sep 17 00:00:00 2001 From: Sergio Prado Date: Tue, 9 Feb 2016 12:07:16 -0200 Subject: net: macb: add wake-on-lan support via magic packet Tested on Acqua A5 SoM (http://www.acmesystems.it/acqua). Signed-off-by: Sergio Prado Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/macb.txt | 2 + drivers/net/ethernet/cadence/macb.c | 67 +++++++++++++++++++++++--- drivers/net/ethernet/cadence/macb.h | 4 ++ 3 files changed, 67 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index d2e243b1ec0e..c6b1cb5ffa87 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -25,6 +25,8 @@ Required properties: Optional properties for PHY child node: - reset-gpios : Should specify the gpio for phy reset +- cdns,magic-packet : If present, indicates that the hardware supports waking + up via magic packet. Examples: diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 50c94104f19c..69af049e55a8 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -58,6 +58,9 @@ #define GEM_MTU_MIN_SIZE 68 +#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) +#define MACB_WOL_ENABLED (0x1 << 1) + /* * Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) @@ -2124,6 +2127,39 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, } } +static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct macb *bp = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { + wol->supported = WAKE_MAGIC; + + if (bp->wol & MACB_WOL_ENABLED) + wol->wolopts |= WAKE_MAGIC; + } +} + +static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct macb *bp = netdev_priv(netdev); + + if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || + (wol->wolopts & ~WAKE_MAGIC)) + return -EOPNOTSUPP; + + if (wol->wolopts & WAKE_MAGIC) + bp->wol |= MACB_WOL_ENABLED; + else + bp->wol &= ~MACB_WOL_ENABLED; + + device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); + + return 0; +} + static const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, @@ -2131,6 +2167,8 @@ static const struct ethtool_ops macb_ethtool_ops = { .get_regs = macb_get_regs, .get_link = ethtool_op_get_link, .get_ts_info = ethtool_op_get_ts_info, + .get_wol = macb_get_wol, + .set_wol = macb_set_wol, }; static const struct ethtool_ops gem_ethtool_ops = { @@ -2890,6 +2928,11 @@ static int macb_probe(struct platform_device *pdev) if (macb_config) bp->jumbo_max_len = macb_config->jumbo_max_len; + bp->wol = 0; + if (of_get_property(np, "cdns,magic-packet", NULL)) + bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; + device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + spin_lock_init(&bp->lock); /* setup capabilities */ @@ -3006,9 +3049,15 @@ static int __maybe_unused macb_suspend(struct device *dev) netif_carrier_off(netdev); netif_device_detach(netdev); - clk_disable_unprepare(bp->tx_clk); - clk_disable_unprepare(bp->hclk); - clk_disable_unprepare(bp->pclk); + if (bp->wol & MACB_WOL_ENABLED) { + macb_writel(bp, IER, MACB_BIT(WOL)); + macb_writel(bp, WOL, MACB_BIT(MAG)); + enable_irq_wake(bp->queues[0].irq); + } else { + clk_disable_unprepare(bp->tx_clk); + clk_disable_unprepare(bp->hclk); + clk_disable_unprepare(bp->pclk); + } return 0; } @@ -3019,9 +3068,15 @@ static int __maybe_unused macb_resume(struct device *dev) struct net_device *netdev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(netdev); - clk_prepare_enable(bp->pclk); - clk_prepare_enable(bp->hclk); - clk_prepare_enable(bp->tx_clk); + if (bp->wol & MACB_WOL_ENABLED) { + macb_writel(bp, IDR, MACB_BIT(WOL)); + macb_writel(bp, WOL, 0); + disable_irq_wake(bp->queues[0].irq); + } else { + clk_prepare_enable(bp->pclk); + clk_prepare_enable(bp->hclk); + clk_prepare_enable(bp->tx_clk); + } netif_device_attach(netdev); diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 0d4ecfcd60b7..9ba416d5afff 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -312,6 +312,8 @@ #define MACB_PFR_SIZE 1 #define MACB_PTZ_OFFSET 13 /* Enable pause time zero interrupt */ #define MACB_PTZ_SIZE 1 +#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */ +#define MACB_WOL_SIZE 1 /* Bitfields in MAN */ #define MACB_DATA_OFFSET 0 /* data */ @@ -842,6 +844,8 @@ struct macb { unsigned int rx_frm_len_mask; unsigned int jumbo_max_len; + + u32 wol; }; static inline bool macb_is_gem(struct macb *bp) -- cgit v1.2.3 From 1e2a8868e0541b7d8870781b714d74ac7cad9a6a Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Tue, 9 Feb 2016 10:37:46 +0000 Subject: bonding: use return instead of goto Replace 'goto' with 'return' to remove unnecessary check at label: err_undo_flags. The reason is that 'err_undo_flags' do two things for the first slave device: 1.revert bond mac address if it is set by the slave device. 2.revert bond device type if it's not ARPHRD_ETHER. It's not necessary for the following three places, they changed neither bond mac address nor type. It's straightforward to return directly. Signed-off-by: Zhang Shengju Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 705cb0198faa..45bdd87d6b7a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1379,8 +1379,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (slave_dev->flags & IFF_UP) { netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n", slave_dev->name); - res = -EPERM; - goto err_undo_flags; + return -EPERM; } /* set bonding device ether type by slave - bonding netdevices are @@ -1400,8 +1399,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) res = notifier_to_errno(res); if (res) { netdev_err(bond_dev, "refused to change device type\n"); - res = -EBUSY; - goto err_undo_flags; + return -EBUSY; } /* Flush unicast and multicast addresses */ @@ -1421,8 +1419,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) } else if (bond_dev->type != slave_dev->type) { netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n", slave_dev->name, slave_dev->type, bond_dev->type); - res = -EINVAL; - goto err_undo_flags; + return -EINVAL; } if (slave_ops->ndo_set_mac_address == NULL) { -- cgit v1.2.3 From a05d4845907a6f0296612d24956b189a51fb8df7 Mon Sep 17 00:00:00 2001 From: Thanneeru Srinivasulu Date: Thu, 11 Feb 2016 21:50:21 +0530 Subject: net, thunderx: Add TX timeout and RX buffer alloc failure stats. When system is low on atomic memory, too many error messages are logged. Since this is not a total failure but a simple switch to non-atomic allocation better to have a stat. Also add a stat for reset, kicked due to transmit watchdog timeout. Signed-off-by: Thanneeru Srinivasulu Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 3 +++ drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | 2 ++ drivers/net/ethernet/cavium/thunder/nicvf_main.c | 1 + drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 3 +-- 4 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 688828865c48..8af363a9af60 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -248,10 +248,13 @@ struct nicvf_drv_stats { u64 rx_frames_jumbo; u64 rx_drops; + u64 rcv_buffer_alloc_failures; + /* Tx */ u64 tx_frames_ok; u64 tx_drops; u64 tx_tso; + u64 tx_timeout; u64 txq_stop; u64 txq_wake; }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index a12b2e38cf61..d2d8ef270142 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = { NICVF_DRV_STAT(rx_frames_1518), NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_drops), + NICVF_DRV_STAT(rcv_buffer_alloc_failures), NICVF_DRV_STAT(tx_frames_ok), NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_drops), + NICVF_DRV_STAT(tx_timeout), NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_wake), }; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c24cb2a86a42..95db6b7e3aab 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -1394,6 +1394,7 @@ static void nicvf_tx_timeout(struct net_device *dev) netdev_warn(dev, "%s: Transmit timed out, resetting\n", dev->name); + nic->drv_stats.tx_timeout++; schedule_work(&nic->reset_task); } diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d0d1b5490061..50ab6f4d8c19 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -96,8 +96,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, order); if (!nic->rb_page) { - netdev_err(nic->netdev, - "Failed to allocate new rcv buffer\n"); + nic->drv_stats.rcv_buffer_alloc_failures++; return -ENOMEM; } nic->rb_page_offset = 0; -- cgit v1.2.3 From ef0a4d8601760b346d9d0893f2a554c338861c4f Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Thu, 11 Feb 2016 21:50:22 +0530 Subject: net: thunderx: Use napi_schedule_irqoff() napi_schedule is being called from hard irq context, hence switch to napi_schedule_irqoff which avoids unneeded call to local_irq_save and local_irq_restore. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 95db6b7e3aab..c6f146cf266d 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -828,7 +828,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); /* Schedule NAPI */ - napi_schedule(&cq_poll->napi); + napi_schedule_irqoff(&cq_poll->napi); /* Clear interrupt */ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); -- cgit v1.2.3 From fb4b7d98a0215fc3310c8415a86acfe726de395c Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Thu, 11 Feb 2016 21:50:23 +0530 Subject: net: thunderx: Assign affinity hints to vf's interrupts This affinity hint can be used by user space irqbalance tool to set preferred CPU mask for irqs registered by this VF. Irqbalance needs to be in 'exact' mode to set irq affinity same as indicated by affinity hint. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 1 + drivers/net/ethernet/cavium/thunder/nicvf_main.c | 37 ++++++++++++++++++++++-- 2 files changed, 36 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 8af363a9af60..00cc9156abbb 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -309,6 +309,7 @@ struct nicvf { struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; char irq_name[NIC_VF_MSIX_VECTORS][20]; bool irq_allocated[NIC_VF_MSIX_VECTORS]; + cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS]; /* VF <-> PF mailbox communication */ bool pf_acked; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index c6f146cf266d..90ce93e380e1 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -899,6 +899,31 @@ static void nicvf_disable_msix(struct nicvf *nic) } } +static void nicvf_set_irq_affinity(struct nicvf *nic) +{ + int vec, cpu; + int irqnum; + + for (vec = 0; vec < nic->num_vec; vec++) { + if (!nic->irq_allocated[vec]) + continue; + + if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL)) + return; + /* CQ interrupts */ + if (vec < NICVF_INTR_ID_SQ) + /* Leave CPU0 for RBDR and other interrupts */ + cpu = nicvf_netdev_qidx(nic, vec) + 1; + else + cpu = 0; + + cpumask_set_cpu(cpumask_local_spread(cpu, nic->node), + nic->affinity_mask[vec]); + irqnum = nic->msix_entries[vec].vector; + irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]); + } +} + static int nicvf_register_interrupts(struct nicvf *nic) { int irq, ret = 0; @@ -944,8 +969,13 @@ static int nicvf_register_interrupts(struct nicvf *nic) ret = request_irq(nic->msix_entries[irq].vector, nicvf_qs_err_intr_handler, 0, nic->irq_name[irq], nic); - if (!ret) - nic->irq_allocated[irq] = true; + if (ret) + goto err; + + nic->irq_allocated[irq] = true; + + /* Set IRQ affinities */ + nicvf_set_irq_affinity(nic); err: if (ret) @@ -963,6 +993,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) if (!nic->irq_allocated[irq]) continue; + irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL); + free_cpumask_var(nic->affinity_mask[irq]); + if (irq < NICVF_INTR_ID_SQ) free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); else -- cgit v1.2.3 From eee326fd83348ed39a06c0db999ed513d10d9c39 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 11 Feb 2016 21:50:24 +0530 Subject: net: thunderx: bgx: Use standard firmware node infrastructure. In the case of OF device tree, the firmware information is attached to the BGX device structure in the standard manner, so use the firmware iterators and accessors where possible. Signed-off-by: David Daney Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 27 ++++++++++++----------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 9df26c2263bc..111835ba18e3 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -968,26 +968,27 @@ static int bgx_init_acpi_phy(struct bgx *bgx) static int bgx_init_of_phy(struct bgx *bgx) { - struct device_node *np; - struct device_node *np_child; + struct fwnode_handle *fwn; u8 lmac = 0; - char bgx_sel[5]; const char *mac; - /* Get BGX node from DT */ - snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); - np = of_find_node_by_name(NULL, bgx_sel); - if (!np) - return -ENODEV; + device_for_each_child_node(&bgx->pdev->dev, fwn) { + struct device_node *phy_np; + struct device_node *node = to_of_node(fwn); + + /* If it is not an OF node we cannot handle it yet, so + * exit the loop. + */ + if (!node) + break; - for_each_child_of_node(np, np_child) { - struct device_node *phy_np = of_parse_phandle(np_child, - "phy-handle", 0); + phy_np = of_parse_phandle(node, "phy-handle", 0); if (!phy_np) continue; + bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); - mac = of_get_mac_address(np_child); + mac = of_get_mac_address(node); if (mac) ether_addr_copy(bgx->lmac[lmac].mac, mac); @@ -995,7 +996,7 @@ static int bgx_init_of_phy(struct bgx *bgx) bgx->lmac[lmac].lmacid = lmac; lmac++; if (lmac == MAX_LMAC_PER_BGX) { - of_node_put(np_child); + of_node_put(node); break; } } -- cgit v1.2.3 From 1d82efaca87ecf53e97c696f9d0a9adefea0c7b5 Mon Sep 17 00:00:00 2001 From: Robert Richter Date: Thu, 11 Feb 2016 21:50:25 +0530 Subject: net: thunderx: bgx: Add log message when setting mac address Signed-off-by: Robert Richter Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 111835ba18e3..f8abdffdd851 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx) #ifdef CONFIG_ACPI -static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) +static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev, + u8 *dst) { u8 mac[ETH_ALEN]; int ret; @@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) goto out; if (!is_valid_ether_addr(mac)) { + dev_err(dev, "MAC address invalid: %pM\n", mac); ret = -EINVAL; goto out; } + dev_info(dev, "MAC address set to: %pM\n", mac); + memcpy(dst, mac, ETH_ALEN); out: return ret; @@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, u32 lvl, void *context, void **rv) { struct bgx *bgx = context; + struct device *dev = &bgx->pdev->dev; struct acpi_device *adev; if (acpi_bus_get_device(handle, &adev)) goto out; - acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); + SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev); bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; out: -- cgit v1.2.3 From 6e4be8d6717cb63c58f6b404e63a881c76d8878c Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Thu, 11 Feb 2016 21:50:26 +0530 Subject: net: thunderx: Alloc higher order pages when pagesize is small Allocate higher order pages when pagesize is small, this will reduce number of calls to page allocator and wastage of memory. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 50ab6f4d8c19..5adb208c1ad2 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -78,7 +78,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, u32 buf_len, u64 **rbuf) { - int order = get_order(buf_len); + int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; /* Check if request can be accomodated in previous allocated page */ if (nic->rb_page) { -- cgit v1.2.3 From aa54c8da9a8e480861346d88b57086490bdd7b03 Mon Sep 17 00:00:00 2001 From: Helmut Buchsbaum Date: Tue, 9 Feb 2016 20:47:13 +0100 Subject: net: phy: spi_ks8995: introduce spi_device_id table Refactor to use spi_device_id table to facilitate easy extendability. Signed-off-by: Helmut Buchsbaum Signed-off-by: David S. Miller --- drivers/net/phy/spi_ks8995.c | 42 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index c72c42206850..e7a215d2b1ae 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -89,6 +89,28 @@ #define KS8995_RESET_DELAY 10 /* usec */ +enum ks8995_chip_variant { + ks8995, + ksz8864, + max_variant +}; + +struct ks8995_chip_params { + char *name; + int regs_size; +}; + +static const struct ks8995_chip_params ks8995_chip[] = { + [ks8995] = { + .name = "KS8995MA", + .regs_size = KS8995_REGS_SIZE, + }, + [ksz8864] = { + .name = "KSZ8864RMN", + .regs_size = KSZ8864_REGS_SIZE, + }, +}; + struct ks8995_pdata { /* not yet implemented */ }; @@ -98,8 +120,16 @@ struct ks8995_switch { struct mutex lock; struct ks8995_pdata *pdata; struct bin_attribute regs_attr; + const struct ks8995_chip_params *chip; }; +static const struct spi_device_id ks8995_id[] = { + {"ks8995", ks8995}, + {"ksz8864", ksz8864}, + { } +}; +MODULE_DEVICE_TABLE(spi, ks8995_id); + static inline u8 get_chip_id(u8 val) { return (val >> ID1_CHIPID_S) & ID1_CHIPID_M; @@ -244,17 +274,22 @@ static const struct bin_attribute ks8995_registers_attr = { }; /* ------------------------------------------------------------------------ */ - static int ks8995_probe(struct spi_device *spi) { struct ks8995_switch *ks; struct ks8995_pdata *pdata; u8 ids[2]; int err; + int variant = spi_get_device_id(spi)->driver_data; /* Chip description */ pdata = spi->dev.platform_data; + if (variant >= max_variant) { + dev_err(&spi->dev, "bad chip variant %d\n", variant); + return -ENODEV; + } + ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL); if (!ks) return -ENOMEM; @@ -262,6 +297,8 @@ static int ks8995_probe(struct spi_device *spi) mutex_init(&ks->lock); ks->pdata = pdata; ks->spi = spi_dev_get(spi); + ks->chip = &ks8995_chip[variant]; + spi_set_drvdata(spi, ks); spi->mode = SPI_MODE_0; @@ -287,6 +324,7 @@ static int ks8995_probe(struct spi_device *spi) return -ENODEV; } + ks->regs_attr.size = ks->chip->regs_size; memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr)); if (get_chip_id(ids[1]) != CHIPID_M) { u8 val; @@ -303,7 +341,6 @@ static int ks8995_probe(struct spi_device *spi) dev_err(&spi->dev, "unknown chip:%02x,0\n", ids[1]); return err; } - ks->regs_attr.size = KSZ8864_REGS_SIZE; } err = ks8995_reset(ks); @@ -346,6 +383,7 @@ static struct spi_driver ks8995_driver = { }, .probe = ks8995_probe, .remove = ks8995_remove, + .id_table = ks8995_id, }; module_spi_driver(ks8995_driver); -- cgit v1.2.3 From 484e36ff18341ca1fca4258139100b2e90530f53 Mon Sep 17 00:00:00 2001 From: Helmut Buchsbaum Date: Tue, 9 Feb 2016 20:47:14 +0100 Subject: net: phy: spi_ks8995: verify chip and determine revision Since the chip variant is now determined by spi_device_id, verify family and chip id and determine the revision id. Signed-off-by: Helmut Buchsbaum Signed-off-by: David S. Miller --- drivers/net/phy/spi_ks8995.c | 118 +++++++++++++++++++++++++++++-------------- 1 file changed, 80 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index e7a215d2b1ae..5e66d4bd7325 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -83,6 +83,8 @@ #define FAMILY_KS8995 0x95 #define CHIPID_M 0 +#define KS8995_CHIP_ID 0x00 +#define KSZ8864_CHIP_ID 0x01 #define KS8995_CMD_WRITE 0x02U #define KS8995_CMD_READ 0x03U @@ -97,16 +99,22 @@ enum ks8995_chip_variant { struct ks8995_chip_params { char *name; + int family_id; + int chip_id; int regs_size; }; static const struct ks8995_chip_params ks8995_chip[] = { [ks8995] = { .name = "KS8995MA", + .family_id = FAMILY_KS8995, + .chip_id = KS8995_CHIP_ID, .regs_size = KS8995_REGS_SIZE, }, [ksz8864] = { .name = "KSZ8864RMN", + .family_id = FAMILY_KS8995, + .chip_id = KSZ8864_CHIP_ID, .regs_size = KSZ8864_REGS_SIZE, }, }; @@ -121,6 +129,7 @@ struct ks8995_switch { struct ks8995_pdata *pdata; struct bin_attribute regs_attr; const struct ks8995_chip_params *chip; + int revision_id; }; static const struct spi_device_id ks8995_id[] = { @@ -263,6 +272,73 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj, return ks8995_write(ks8995, buf, off, count); } +/* ks8995_get_revision - get chip revision + * @ks: pointer to switch instance + * + * Verify chip family and id and get chip revision. + */ +static int ks8995_get_revision(struct ks8995_switch *ks) +{ + int err; + u8 id0, id1, ksz8864_id; + + /* read family id */ + err = ks8995_read_reg(ks, KS8995_REG_ID0, &id0); + if (err) { + err = -EIO; + goto err_out; + } + + /* verify family id */ + if (id0 != ks->chip->family_id) { + dev_err(&ks->spi->dev, "chip family id mismatch: expected 0x%02x but 0x%02x read\n", + ks->chip->family_id, id0); + err = -ENODEV; + goto err_out; + } + + switch (ks->chip->family_id) { + case FAMILY_KS8995: + /* try reading chip id at CHIP ID1 */ + err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1); + if (err) { + err = -EIO; + goto err_out; + } + + /* verify chip id */ + if ((get_chip_id(id1) == CHIPID_M) && + (get_chip_id(id1) == ks->chip->chip_id)) { + /* KS8995MA */ + ks->revision_id = get_chip_rev(id1); + } else if (get_chip_id(id1) != CHIPID_M) { + /* KSZ8864RMN */ + err = ks8995_read_reg(ks, KS8995_REG_ID1, &ksz8864_id); + if (err) { + err = -EIO; + goto err_out; + } + + if ((ksz8864_id & 0x80) && + (ks->chip->chip_id == KSZ8864_CHIP_ID)) { + ks->revision_id = get_chip_rev(id1); + } + + } else { + dev_err(&ks->spi->dev, "unsupported chip id for KS8995 family: 0x%02x\n", + id1); + err = -ENODEV; + } + break; + default: + dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0); + err = -ENODEV; + break; + } +err_out: + return err; +} + static const struct bin_attribute ks8995_registers_attr = { .attr = { .name = "registers", @@ -278,7 +354,6 @@ static int ks8995_probe(struct spi_device *spi) { struct ks8995_switch *ks; struct ks8995_pdata *pdata; - u8 ids[2]; int err; int variant = spi_get_device_id(spi)->driver_data; @@ -309,39 +384,12 @@ static int ks8995_probe(struct spi_device *spi) return err; } - err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids)); - if (err < 0) { - dev_err(&spi->dev, "unable to read id registers, err=%d\n", - err); + err = ks8995_get_revision(ks); + if (err) return err; - } - - switch (ids[0]) { - case FAMILY_KS8995: - break; - default: - dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]); - return -ENODEV; - } ks->regs_attr.size = ks->chip->regs_size; memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr)); - if (get_chip_id(ids[1]) != CHIPID_M) { - u8 val; - - /* Check if this is a KSZ8864RMN */ - err = ks8995_read(ks, &val, KSZ8864_REG_ID1, sizeof(val)); - if (err < 0) { - dev_err(&spi->dev, - "unable to read chip id register, err=%d\n", - err); - return err; - } - if ((val & 0x80) == 0) { - dev_err(&spi->dev, "unknown chip:%02x,0\n", ids[1]); - return err; - } - } err = ks8995_reset(ks); if (err) @@ -354,14 +402,8 @@ static int ks8995_probe(struct spi_device *spi) return err; } - if (get_chip_id(ids[1]) == CHIPID_M) { - dev_info(&spi->dev, - "KS8995 device found, Chip ID:%x, Revision:%x\n", - get_chip_id(ids[1]), get_chip_rev(ids[1])); - } else { - dev_info(&spi->dev, "KSZ8864 device found, Revision:%x\n", - get_chip_rev(ids[1])); - } + dev_info(&spi->dev, "%s device found, Chip ID:%x, Revision:%x\n", + ks->chip->name, ks->chip->chip_id, ks->revision_id); return 0; } -- cgit v1.2.3 From cd6f288cbaab656cebd524c5ef2388c11378c827 Mon Sep 17 00:00:00 2001 From: Helmut Buchsbaum Date: Tue, 9 Feb 2016 20:47:15 +0100 Subject: net: phy: spi_ks8995: add support for resetting switch using GPIO When using device tree it is no more possible to reset the PHY at board level. Furthermore, doing in the driver allows to power down the switch when it is not used any more. The patch introduces a new optional property "reset-gpios" denoting an appropriate GPIO handle, e.g.: reset-gpios = <&gpio0 46 GPIO_ACTIVE_LOW> Signed-off-by: Helmut Buchsbaum Signed-off-by: David S. Miller --- drivers/net/phy/spi_ks8995.c | 71 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 5e66d4bd7325..8258c166a767 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -18,6 +18,9 @@ #include #include #include +#include +#include +#include #include @@ -120,7 +123,8 @@ static const struct ks8995_chip_params ks8995_chip[] = { }; struct ks8995_pdata { - /* not yet implemented */ + int reset_gpio; + enum of_gpio_flags reset_gpio_flags; }; struct ks8995_switch { @@ -339,6 +343,24 @@ err_out: return err; } +/* ks8995_parse_dt - setup platform data from devicetree + * @ks: pointer to switch instance + * + * Parses supported DT properties and sets up platform data + * accordingly. + */ +static void ks8995_parse_dt(struct ks8995_switch *ks) +{ + struct device_node *np = ks->spi->dev.of_node; + struct ks8995_pdata *pdata = ks->pdata; + + if (!np) + return; + + pdata->reset_gpio = of_get_named_gpio_flags(np, "reset-gpios", 0, + &pdata->reset_gpio_flags); +} + static const struct bin_attribute ks8995_registers_attr = { .attr = { .name = "registers", @@ -352,14 +374,10 @@ static const struct bin_attribute ks8995_registers_attr = { /* ------------------------------------------------------------------------ */ static int ks8995_probe(struct spi_device *spi) { - struct ks8995_switch *ks; - struct ks8995_pdata *pdata; - int err; + struct ks8995_switch *ks; + int err; int variant = spi_get_device_id(spi)->driver_data; - /* Chip description */ - pdata = spi->dev.platform_data; - if (variant >= max_variant) { dev_err(&spi->dev, "bad chip variant %d\n", variant); return -ENODEV; @@ -370,10 +388,42 @@ static int ks8995_probe(struct spi_device *spi) return -ENOMEM; mutex_init(&ks->lock); - ks->pdata = pdata; ks->spi = spi_dev_get(spi); ks->chip = &ks8995_chip[variant]; + if (ks->spi->dev.of_node) { + ks->pdata = devm_kzalloc(&spi->dev, sizeof(*ks->pdata), + GFP_KERNEL); + if (!ks->pdata) + return -ENOMEM; + + ks->pdata->reset_gpio = -1; + + ks8995_parse_dt(ks); + } + + if (!ks->pdata) + ks->pdata = spi->dev.platform_data; + + /* de-assert switch reset */ + if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) { + unsigned long flags; + + flags = (ks->pdata->reset_gpio_flags == OF_GPIO_ACTIVE_LOW ? + GPIOF_ACTIVE_LOW : 0); + + err = devm_gpio_request_one(&spi->dev, + ks->pdata->reset_gpio, + flags, "switch-reset"); + if (err) { + dev_err(&spi->dev, + "failed to get reset-gpios: %d\n", err); + return -EIO; + } + + gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 0); + } + spi_set_drvdata(spi, ks); spi->mode = SPI_MODE_0; @@ -414,11 +464,14 @@ static int ks8995_remove(struct spi_device *spi) sysfs_remove_bin_file(&spi->dev.kobj, &ks->regs_attr); + /* assert reset */ + if (ks->pdata && gpio_is_valid(ks->pdata->reset_gpio)) + gpiod_set_value(gpio_to_desc(ks->pdata->reset_gpio), 1); + return 0; } /* ------------------------------------------------------------------------ */ - static struct spi_driver ks8995_driver = { .driver = { .name = "spi-ks8995", -- cgit v1.2.3 From 6665e62387c64054493411d00c7b0a5a37af88a5 Mon Sep 17 00:00:00 2001 From: Helmut Buchsbaum Date: Tue, 9 Feb 2016 20:47:16 +0100 Subject: net: phy: spi_ks8995: generalize creation of SPI commands Prepare creating SPI reads and writes for other switch families. The KS8995 family uses the straight forward <8bit CMD><8bit ADDR> sequence. To be able to support KSZ8795 family, which uses <3bit CMD><12bit ADDR><1 bit TR> make the SPI command creation chip variant dependent. Signed-off-by: Helmut Buchsbaum Signed-off-by: David S. Miller --- drivers/net/phy/spi_ks8995.c | 46 +++++++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 8258c166a767..9dcc5b4fd9d1 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -105,6 +105,8 @@ struct ks8995_chip_params { int family_id; int chip_id; int regs_size; + int addr_width; + int addr_shift; }; static const struct ks8995_chip_params ks8995_chip[] = { @@ -113,12 +115,16 @@ static const struct ks8995_chip_params ks8995_chip[] = { .family_id = FAMILY_KS8995, .chip_id = KS8995_CHIP_ID, .regs_size = KS8995_REGS_SIZE, + .addr_width = 8, + .addr_shift = 0, }, [ksz8864] = { .name = "KSZ8864RMN", .family_id = FAMILY_KS8995, .chip_id = KSZ8864_CHIP_ID, .regs_size = KSZ8864_REGS_SIZE, + .addr_width = 8, + .addr_shift = 0, }, }; @@ -153,20 +159,44 @@ static inline u8 get_chip_rev(u8 val) return (val >> ID1_REVISION_S) & ID1_REVISION_M; } +/* create_spi_cmd - create a chip specific SPI command header + * @ks: pointer to switch instance + * @cmd: SPI command for switch + * @address: register address for command + * + * Different chip families use different bit pattern to address the switches + * registers: + * + * KS8995: 8bit command + 8bit address + * KSZ8795: 3bit command + 12bit address + 1bit TR (?) + */ +static inline __be16 create_spi_cmd(struct ks8995_switch *ks, int cmd, + unsigned address) +{ + u16 result = cmd; + + /* make room for address (incl. address shift) */ + result <<= ks->chip->addr_width + ks->chip->addr_shift; + /* add address */ + result |= address << ks->chip->addr_shift; + /* SPI protocol needs big endian */ + return cpu_to_be16(result); +} /* ------------------------------------------------------------------------ */ static int ks8995_read(struct ks8995_switch *ks, char *buf, unsigned offset, size_t count) { - u8 cmd[2]; + __be16 cmd; struct spi_transfer t[2]; struct spi_message m; int err; + cmd = create_spi_cmd(ks, KS8995_CMD_READ, offset); spi_message_init(&m); memset(&t, 0, sizeof(t)); - t[0].tx_buf = cmd; + t[0].tx_buf = &cmd; t[0].len = sizeof(cmd); spi_message_add_tail(&t[0], &m); @@ -174,9 +204,6 @@ static int ks8995_read(struct ks8995_switch *ks, char *buf, t[1].len = count; spi_message_add_tail(&t[1], &m); - cmd[0] = KS8995_CMD_READ; - cmd[1] = offset; - mutex_lock(&ks->lock); err = spi_sync(ks->spi, &m); mutex_unlock(&ks->lock); @@ -184,20 +211,20 @@ static int ks8995_read(struct ks8995_switch *ks, char *buf, return err ? err : count; } - static int ks8995_write(struct ks8995_switch *ks, char *buf, unsigned offset, size_t count) { - u8 cmd[2]; + __be16 cmd; struct spi_transfer t[2]; struct spi_message m; int err; + cmd = create_spi_cmd(ks, KS8995_CMD_WRITE, offset); spi_message_init(&m); memset(&t, 0, sizeof(t)); - t[0].tx_buf = cmd; + t[0].tx_buf = &cmd; t[0].len = sizeof(cmd); spi_message_add_tail(&t[0], &m); @@ -205,9 +232,6 @@ static int ks8995_write(struct ks8995_switch *ks, char *buf, t[1].len = count; spi_message_add_tail(&t[1], &m); - cmd[0] = KS8995_CMD_WRITE; - cmd[1] = offset; - mutex_lock(&ks->lock); err = spi_sync(ks->spi, &m); mutex_unlock(&ks->lock); -- cgit v1.2.3 From c0e6cb1fed5a7390ac84396f6f43e70fc2034ac6 Mon Sep 17 00:00:00 2001 From: Helmut Buchsbaum Date: Tue, 9 Feb 2016 20:47:17 +0100 Subject: net: phy: spi_ks8995: add support for MICREL KSZ8795CLX Add support for MICREL KSZ8795CLX Integrated 5-Port, 10-/100-Managed Ethernet Switch with Gigabit GMII/RGMII and MII/RMII interfaces. Signed-off-by: Helmut Buchsbaum Signed-off-by: David S. Miller --- drivers/net/phy/spi_ks8995.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 9dcc5b4fd9d1..5e7340f6b37c 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -77,6 +77,7 @@ #define KS8995_REGS_SIZE 0x80 #define KSZ8864_REGS_SIZE 0x100 +#define KSZ8795_REGS_SIZE 0x100 #define ID1_CHIPID_M 0xf #define ID1_CHIPID_S 4 @@ -85,9 +86,11 @@ #define ID1_START_SW 1 /* start the switch */ #define FAMILY_KS8995 0x95 +#define FAMILY_KSZ8795 0x87 #define CHIPID_M 0 #define KS8995_CHIP_ID 0x00 #define KSZ8864_CHIP_ID 0x01 +#define KSZ8795_CHIP_ID 0x09 #define KS8995_CMD_WRITE 0x02U #define KS8995_CMD_READ 0x03U @@ -97,6 +100,7 @@ enum ks8995_chip_variant { ks8995, ksz8864, + ksz8795, max_variant }; @@ -126,6 +130,14 @@ static const struct ks8995_chip_params ks8995_chip[] = { .addr_width = 8, .addr_shift = 0, }, + [ksz8795] = { + .name = "KSZ8795CLX", + .family_id = FAMILY_KSZ8795, + .chip_id = KSZ8795_CHIP_ID, + .regs_size = KSZ8795_REGS_SIZE, + .addr_width = 12, + .addr_shift = 1, + }, }; struct ks8995_pdata { @@ -145,6 +157,7 @@ struct ks8995_switch { static const struct spi_device_id ks8995_id[] = { {"ks8995", ks8995}, {"ksz8864", ksz8864}, + {"ksz8795", ksz8795}, { } }; MODULE_DEVICE_TABLE(spi, ks8995_id); @@ -358,6 +371,22 @@ static int ks8995_get_revision(struct ks8995_switch *ks) err = -ENODEV; } break; + case FAMILY_KSZ8795: + /* try reading chip id at CHIP ID1 */ + err = ks8995_read_reg(ks, KS8995_REG_ID1, &id1); + if (err) { + err = -EIO; + goto err_out; + } + + if (get_chip_id(id1) == ks->chip->chip_id) { + ks->revision_id = get_chip_rev(id1); + } else { + dev_err(&ks->spi->dev, "unsupported chip id for KSZ8795 family: 0x%02x\n", + id1); + err = -ENODEV; + } + break; default: dev_err(&ks->spi->dev, "unsupported family id: 0x%02x\n", id0); err = -ENODEV; -- cgit v1.2.3 From 8431706b4d1e684efee4b9e91e655f1161e883a8 Mon Sep 17 00:00:00 2001 From: Ivan Vecera Date: Thu, 11 Feb 2016 12:42:26 +0100 Subject: be2net: don't report EVB for older chipsets when SR-IOV is disabled The EVB (virtual bridge) functionality should be disabled on older BE3 and Lancer chips if SR-IOV is disabled in the NIC's BIOS. This setting is identified by the zero value of total VFs reported by the card. The GET_HSW_CONFIG command cannot be used as it is not supported by these older chipset's FW. v2: added the comment Cc: Sathya Perla Cc: Ajit Khaparde Cc: Padmanabh Ratnakar Cc: Sriharsha Basavapatna Cc: Somnath Kotur Signed-off-by: Ivan Vecera Acked-by: Sathya Perla Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 4ecd8c2beaf5..88f427cb76c3 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4617,6 +4617,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, /* BE and Lancer chips support VEB mode only */ if (BEx_chip(adapter) || lancer_chip(adapter)) { + /* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */ + if (!pci_sriov_get_totalvfs(adapter->pdev)) + return 0; hsw_mode = PORT_FWD_TYPE_VEB; } else { status = be_cmd_get_hsw_config(adapter, NULL, 0, -- cgit v1.2.3 From 2f67864b6d5b653ee4518ab0d20549283b49a527 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Sun, 7 Feb 2016 11:47:17 -0600 Subject: net: phy: dp83848: Add macro for dp83848 compatible devices Add a helper macro for defining dp83848 compatible phy devices. Update copyright info. Signed-off-by: Andrew F. Davis Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 41 ++++++++++++++++++++++------------------- 1 file changed, 22 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 5e14e629c597..4e78f541b8c4 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -1,7 +1,7 @@ /* * Driver for the Texas Instruments DP83848 PHY * - * Copyright (C) 2015 Texas Instruments Inc. + * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -70,25 +70,28 @@ static struct mdio_device_id __maybe_unused dp83848_tbl[] = { }; MODULE_DEVICE_TABLE(mdio, dp83848_tbl); -static struct phy_driver dp83848_driver[] = { - { - .phy_id = DP83848_PHY_ID, - .phy_id_mask = 0xfffffff0, - .name = "TI DP83848", - .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, - - .soft_reset = genphy_soft_reset, - .config_init = genphy_config_init, - .suspend = genphy_suspend, - .resume = genphy_resume, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, +#define DP83848_PHY_DRIVER(_id, _name) \ + { \ + .phy_id = _id, \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_BASIC_FEATURES, \ + .flags = PHY_HAS_INTERRUPT, \ + \ + .soft_reset = genphy_soft_reset, \ + .config_init = genphy_config_init, \ + .suspend = genphy_suspend, \ + .resume = genphy_resume, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + \ + /* IRQ related */ \ + .ack_interrupt = dp83848_ack_interrupt, \ + .config_intr = dp83848_config_intr, \ + } - /* IRQ related */ - .ack_interrupt = dp83848_ack_interrupt, - .config_intr = dp83848_config_intr, - }, +static struct phy_driver dp83848_driver[] = { + DP83848_PHY_DRIVER(DP83848_PHY_ID, "TI DP83848 10/100 Mbps PHY"), }; module_phy_driver(dp83848_driver); -- cgit v1.2.3 From 68336293462c92ba420528dd293d54ee879b6e15 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Sun, 7 Feb 2016 11:47:18 -0600 Subject: net: phy: dp83848: Add PHY ID for TI version of DP83848C After acquiring National Semiconductor, TI appears to have changed the Vendor Model Number for the DP83848C PHYs, add this new ID to supported IDs. Signed-off-by: Andrew F. Davis Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 4e78f541b8c4..d4686d5f6e03 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -16,7 +16,8 @@ #include #include -#define DP83848_PHY_ID 0x20005c90 +#define TI_DP83848C_PHY_ID 0x20005ca0 +#define NS_DP83848C_PHY_ID 0x20005c90 /* Registers */ #define DP83848_MICR 0x11 @@ -65,7 +66,8 @@ static int dp83848_config_intr(struct phy_device *phydev) } static struct mdio_device_id __maybe_unused dp83848_tbl[] = { - { DP83848_PHY_ID, 0xfffffff0 }, + { TI_DP83848C_PHY_ID, 0xfffffff0 }, + { NS_DP83848C_PHY_ID, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, dp83848_tbl); @@ -91,7 +93,8 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); } static struct phy_driver dp83848_driver[] = { - DP83848_PHY_DRIVER(DP83848_PHY_ID, "TI DP83848 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), }; module_phy_driver(dp83848_driver); -- cgit v1.2.3 From cf13be5afd30363e5e6e1232e004bae99ee3c623 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Sun, 7 Feb 2016 11:47:19 -0600 Subject: net: phy: dp83848: Reorganize code for readability and safety Reorganize code by moving the desired interrupt mask definition out of function. Also rearrange the enable/disable interrupt function to prevent accidental over-writing of values in registers. Signed-off-by: Andrew F. Davis Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index d4686d5f6e03..20d3b9dae444 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -37,6 +37,12 @@ #define DP83848_MISR_ED_INT_EN BIT(6) /* Energy detect */ #define DP83848_MISR_LQM_INT_EN BIT(7) /* Link Quality Monitor */ +#define DP83848_INT_EN_MASK \ + (DP83848_MISR_ANC_INT_EN | \ + DP83848_MISR_DUP_INT_EN | \ + DP83848_MISR_SPD_INT_EN | \ + DP83848_MISR_LINK_INT_EN) + static int dp83848_ack_interrupt(struct phy_device *phydev) { int err = phy_read(phydev, DP83848_MISR); @@ -46,23 +52,24 @@ static int dp83848_ack_interrupt(struct phy_device *phydev) static int dp83848_config_intr(struct phy_device *phydev) { - int err; + int control, ret; + + control = phy_read(phydev, DP83848_MICR); + if (control < 0) + return control; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { - err = phy_write(phydev, DP83848_MICR, - DP83848_MICR_INT_OE | - DP83848_MICR_INTEN); - if (err < 0) - return err; - - return phy_write(phydev, DP83848_MISR, - DP83848_MISR_ANC_INT_EN | - DP83848_MISR_DUP_INT_EN | - DP83848_MISR_SPD_INT_EN | - DP83848_MISR_LINK_INT_EN); + control |= DP83848_MICR_INT_OE; + control |= DP83848_MICR_INTEN; + + ret = phy_write(phydev, DP83848_MISR, DP83848_INT_EN_MASK); + if (ret < 0) + return ret; + } else { + control &= ~DP83848_MICR_INTEN; } - return phy_write(phydev, DP83848_MICR, 0x0); + return phy_write(phydev, DP83848_MICR, control); } static struct mdio_device_id __maybe_unused dp83848_tbl[] = { -- cgit v1.2.3 From d1782f7b0cb853ec0a9b7b3e5f8a3252ed8a054e Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Sun, 7 Feb 2016 11:47:20 -0600 Subject: net: phy: dp83848: Add support for TI TLK10x Ethernet PHYs The TI TLK10x Ethernet PHYs are similar in the interrupt relevant registers and so are compatible with the DP83848x devices already supported. Signed-off-by: Andrew F. Davis Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 20d3b9dae444..f897989d1bf7 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -18,6 +18,7 @@ #define TI_DP83848C_PHY_ID 0x20005ca0 #define NS_DP83848C_PHY_ID 0x20005c90 +#define TLK10X_PHY_ID 0x2000a210 /* Registers */ #define DP83848_MICR 0x11 @@ -75,6 +76,7 @@ static int dp83848_config_intr(struct phy_device *phydev) static struct mdio_device_id __maybe_unused dp83848_tbl[] = { { TI_DP83848C_PHY_ID, 0xfffffff0 }, { NS_DP83848C_PHY_ID, 0xfffffff0 }, + { TLK10X_PHY_ID, 0xfffffff0 }, { } }; MODULE_DEVICE_TABLE(mdio, dp83848_tbl); @@ -102,6 +104,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), }; module_phy_driver(dp83848_driver); -- cgit v1.2.3 From 5fed039351dd54b295cba7c9afe34ede99d9b2e9 Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Sun, 7 Feb 2016 11:47:21 -0600 Subject: net: phy: dp83848: Add comments for register definitions Signed-off-by: Andrew F. Davis Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index f897989d1bf7..556904f572d6 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -21,8 +21,8 @@ #define TLK10X_PHY_ID 0x2000a210 /* Registers */ -#define DP83848_MICR 0x11 -#define DP83848_MISR 0x12 +#define DP83848_MICR 0x11 /* MII Interrupt Control Register */ +#define DP83848_MISR 0x12 /* MII Interrupt Status Register */ /* MICR Register Fields */ #define DP83848_MICR_INT_OE BIT(0) /* Interrupt Output Enable */ -- cgit v1.2.3 From 0cf3ace9e7cb47e3173561a8fb2601a12d8f75d2 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Sun, 7 Feb 2016 21:52:24 +0100 Subject: virtio_net: validate ethtool port setting and explain the user validation We should validate the port setting that we got from the user and check if it's what we've set it to (PORT_OTHER), also add explanation that ignoring advertising is good as long as we don't have autonegotiation. Signed-off-by: Nikolay Aleksandrov Acked-by: Michael S. Tsirkin Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c9fd52a8e6ec..fb0eae42bf39 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1386,11 +1386,13 @@ static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd) struct ethtool_cmd diff1 = *cmd; struct ethtool_cmd diff2 = {}; - /* advertising and cmd are usually set, ignore port because we set it */ + /* cmd is always set so we need to clear it, validate the port type + * and also without autonegotiation we can ignore advertising + */ ethtool_cmd_speed_set(&diff1, 0); + diff2.port = PORT_OTHER; diff1.advertising = 0; diff1.duplex = 0; - diff1.port = 0; diff1.cmd = 0; return !memcmp(&diff1, &diff2, sizeof(diff1)); -- cgit v1.2.3 From a3a8749d34d8a5ac071c7ead792bd21ffe328aa0 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Mon, 8 Feb 2016 13:15:09 +0100 Subject: ixgbe: bulk free SKBs during TX completion cleanup cycle There is an opportunity to bulk free SKBs during reclaiming of resources after DMA transmit completes in ixgbe_clean_tx_irq. Thus, bulk freeing at this point does not introduce any added latency. Simply use napi_consume_skb() which were recently introduced. The napi_budget parameter is needed by napi_consume_skb() to detect if it is called from netpoll. Benchmarking IPv4-forwarding, on CPU i7-4790K @4.2GHz (no turbo boost) Single CPU/flow numbers: before: 1982144 pps -> after : 2064446 pps Improvement: +82302 pps, -20 nanosec, +4.1% (SLUB and GCC version 5.1.1 20150618 (Red Hat 5.1.1-4)) Joint work with Alexander Duyck. Signed-off-by: Alexander Duyck Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4003a88bbf6..0c701b8438b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1089,7 +1089,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) * @tx_ring: tx ring to clean **/ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, - struct ixgbe_ring *tx_ring) + struct ixgbe_ring *tx_ring, int napi_budget) { struct ixgbe_adapter *adapter = q_vector->adapter; struct ixgbe_tx_buffer *tx_buffer; @@ -1127,7 +1127,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, total_packets += tx_buffer->gso_segs; /* free the skb */ - dev_consume_skb_any(tx_buffer->skb); + napi_consume_skb(tx_buffer->skb, napi_budget); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -2784,7 +2784,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget) #endif ixgbe_for_each_ring(ring, q_vector->tx) - clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); + clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget); /* Exit if we are called by netpoll or busy polling is active */ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) -- cgit v1.2.3 From b57085019dcdd52d40ba153c6957f9e5a2f4b931 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 11 Feb 2016 20:57:17 +0000 Subject: net: vxlan: enable local checksum offload Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ebf57d90d295..9f52203ac860 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1702,10 +1702,8 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, if (csum_start <= VXLAN_MAX_REMCSUM_START && !(csum_start & VXLAN_RCO_SHIFT_MASK) && (skb->csum_offset == offsetof(struct udphdr, check) || - skb->csum_offset == offsetof(struct tcphdr, check))) { - udp_sum = false; + skb->csum_offset == offsetof(struct tcphdr, check))) type |= SKB_GSO_TUNNEL_REMCSUM; - } } min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len @@ -1723,7 +1721,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, if (WARN_ON(!skb)) return -ENOMEM; - skb = iptunnel_handle_offloads(skb, udp_sum, type); + skb = iptunnel_handle_offloads(skb, false, type); if (IS_ERR(skb)) return PTR_ERR(skb); -- cgit v1.2.3 From 6fa79666e24d32be1b709f5269af41ed9e829e7e Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Thu, 11 Feb 2016 21:02:31 +0000 Subject: net: ip_tunnel: remove 'csum_help' argument to iptunnel_handle_offloads All users now pass false, so we can remove it, and remove the code that was conditional upon it. Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 2 +- include/net/ip_tunnels.h | 3 +-- include/net/udp_tunnel.h | 3 +-- net/ipv4/fou.c | 4 ++-- net/ipv4/ip_gre.c | 3 +-- net/ipv4/ip_tunnel_core.c | 18 ++++++------------ net/ipv4/ipip.c | 2 +- net/ipv6/sit.c | 4 ++-- net/netfilter/ipvs/ip_vs_xmit.c | 6 ++---- 9 files changed, 17 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 9f52203ac860..0a23c64379d6 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1721,7 +1721,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, if (WARN_ON(!skb)) return -ENOMEM; - skb = iptunnel_handle_offloads(skb, false, type); + skb = iptunnel_handle_offloads(skb, type); if (IS_ERR(skb)) return PTR_ERR(skb); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 6db96ea0144f..bc439f32baa9 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -279,8 +279,7 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, gfp_t flags); -struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum, - int gso_type_mask); +struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, int gso_type_mask); static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len) { diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 734c15662ea9..97f5adb121a6 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -103,8 +103,7 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb, { int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - /* As we're a UDP tunnel, we support LCO, so don't need csum_help */ - return iptunnel_handle_offloads(skb, false, type); + return iptunnel_handle_offloads(skb, type); } static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff) diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index dac1874a5911..88dab0c1670c 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -787,7 +787,7 @@ int fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, SKB_GSO_UDP_TUNNEL; __be16 sport; - skb = iptunnel_handle_offloads(skb, false, type); + skb = iptunnel_handle_offloads(skb, type); if (IS_ERR(skb)) return PTR_ERR(skb); @@ -820,7 +820,7 @@ int gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, optlen += need_priv ? GUE_LEN_PRIV : 0; - skb = iptunnel_handle_offloads(skb, false, type); + skb = iptunnel_handle_offloads(skb, type); if (IS_ERR(skb)) return PTR_ERR(skb); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 9b31532d95f4..65748db44285 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -503,8 +503,7 @@ static void __gre_xmit(struct sk_buff *skb, struct net_device *dev, static struct sk_buff *gre_handle_offloads(struct sk_buff *skb, bool csum) { - return iptunnel_handle_offloads(skb, false, - csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); + return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE); } static struct rtable *gre_get_rt(struct sk_buff *skb, diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index d74ce93de1fe..a6e58b6141cd 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -148,7 +148,6 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md, EXPORT_SYMBOL_GPL(iptunnel_metadata_reply); struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, - bool csum_help, int gso_type_mask) { int err; @@ -166,18 +165,13 @@ struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, return skb; } - /* If packet is not gso and we are not offloading inner checksum, - * clear encapsulation flag. This allows setting CHECKSUM_PARTIAL - * on the outer header without confusing devices that implement - * NETIF_F_IP_CSUM with encapsulation. - */ - if (skb->ip_summed == CHECKSUM_PARTIAL && csum_help) { - skb->encapsulation = 0; - err = skb_checksum_help(skb); - if (unlikely(err)) - goto error; - } else if (skb->ip_summed != CHECKSUM_PARTIAL) { + if (skb->ip_summed != CHECKSUM_PARTIAL) { skb->ip_summed = CHECKSUM_NONE; + /* We clear encapsulation here to prevent badly-written + * drivers potentially deciding to offload an inner checksum + * if we set CHECKSUM_PARTIAL on the outer header. + * This should go away when the drivers are all fixed. + */ skb->encapsulation = 0; } diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 4044da61e747..6ec5b42fd172 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -219,7 +219,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(skb->protocol != htons(ETH_P_IP))) goto tx_error; - skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); + skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); if (IS_ERR(skb)) goto out; diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 2066d1c25a11..9a6b407f5840 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -911,7 +911,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, goto tx_error; } - skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT); + skb = iptunnel_handle_offloads(skb, SKB_GSO_SIT); if (IS_ERR(skb)) { ip_rt_put(rt); goto out; @@ -1000,7 +1000,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) struct ip_tunnel *tunnel = netdev_priv(dev); const struct iphdr *tiph = &tunnel->parms.iph; - skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); + skb = iptunnel_handle_offloads(skb, SKB_GSO_IPIP); if (IS_ERR(skb)) goto out; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 3264cb49b333..a3f5cd9b3c4c 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -1019,8 +1019,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, if (IS_ERR(skb)) goto tx_error; - skb = iptunnel_handle_offloads( - skb, false, __tun_gso_type_mask(AF_INET, cp->af)); + skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET, cp->af)); if (IS_ERR(skb)) goto tx_error; @@ -1112,8 +1111,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, if (IS_ERR(skb)) goto tx_error; - skb = iptunnel_handle_offloads( - skb, false, __tun_gso_type_mask(AF_INET6, cp->af)); + skb = iptunnel_handle_offloads(skb, __tun_gso_type_mask(AF_INET6, cp->af)); if (IS_ERR(skb)) goto tx_error; -- cgit v1.2.3 From ceb27759987ec10ba22332bd7fdf1cfb35b86991 Mon Sep 17 00:00:00 2001 From: Shota Suzuki Date: Fri, 11 Dec 2015 18:43:59 +0900 Subject: igb: Remove unnecessary flag setting in igb_set_flag_queue_pairs() If VFs are enabled (max_vfs >= 1), both max_rss_queues and adapter->rss_queues are set to 2 in the case of e1000_82576. In this case, IGB_FLAG_QUEUE_PAIRS is always set in the default block as a result of fall-through, thus setting it in the e1000_82576 block is not necessary. Signed-off-by: Shota Suzuki Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 31e5f3942839..eb24b403534f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2921,14 +2921,6 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, /* Device supports enough interrupts without queue pairing. */ break; case e1000_82576: - /* If VFs are going to be allocated with RSS queues then we - * should pair the queues in order to conserve interrupts due - * to limited supply. - */ - if ((adapter->rss_queues > 1) && - (adapter->vfs_allocated_count > 6)) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - /* fall through */ case e1000_82580: case e1000_i350: case e1000_i354: -- cgit v1.2.3 From 37a5d163fb447b39f7960d0534de30e88ad395bb Mon Sep 17 00:00:00 2001 From: Shota Suzuki Date: Fri, 11 Dec 2015 18:44:00 +0900 Subject: igb: Unpair the queues when changing the number of queues By the commit 72ddef0506da ("igb: Fix oops caused by missing queue pairing"), the IGB_FLAG_QUEUE_PAIRS flag can now be set when changing the number of queues by "ethtool -L", but it is never cleared unless the igb driver is reloaded. This patch clears it if queue pairing becomes unnecessary as a result of "ethtool -L". Signed-off-by: Shota Suzuki Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index eb24b403534f..85c47aa16a31 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2931,6 +2931,8 @@ void igb_set_flag_queue_pairs(struct igb_adapter *adapter, */ if (adapter->rss_queues > (max_rss_queues / 2)) adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + else + adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS; break; } } -- cgit v1.2.3 From 9ce0e8d72678b5b60c99ce4c7af15ec127c761cb Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 11 Dec 2015 16:45:38 -0800 Subject: igb/igbvf: don't give up The driver shouldn't just give up if it fails to get the hardware mailbox lock. This can happen in a situation where the PF-VF communication channel is heavily loaded and causes complete communications failure between the PF and VF drivers. Add a counter and a delay. The driver will now retry ten times, waiting one millisecond between retries. Signed-off-by: Mitch Williams Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_mbx.c | 18 ++++++++++++------ drivers/net/ethernet/intel/igbvf/mbx.c | 20 +++++++++++++------- 2 files changed, 25 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c index 162cc49345d0..10f5c9e016a9 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mbx.c +++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c @@ -322,14 +322,20 @@ static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number) { s32 ret_val = -E1000_ERR_MBX; u32 p2v_mailbox; + int count = 10; - /* Take ownership of the buffer */ - wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); + do { + /* Take ownership of the buffer */ + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); - /* reserve mailbox for vf use */ - p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); - if (p2v_mailbox & E1000_P2VMAILBOX_PFU) - ret_val = 0; + /* reserve mailbox for vf use */ + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); + if (p2v_mailbox & E1000_P2VMAILBOX_PFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); return ret_val; } diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 7b6cb4c3764c..01752f44ace2 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -234,13 +234,19 @@ static s32 e1000_check_for_rst_vf(struct e1000_hw *hw) static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw) { s32 ret_val = -E1000_ERR_MBX; - - /* Take ownership of the buffer */ - ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); - - /* reserve mailbox for VF use */ - if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) - ret_val = E1000_SUCCESS; + int count = 10; + + do { + /* Take ownership of the buffer */ + ew32(V2PMAILBOX(0), E1000_V2PMAILBOX_VFU); + + /* reserve mailbox for VF use */ + if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU) { + ret_val = 0; + break; + } + udelay(1000); + } while (count-- > 0); return ret_val; } -- cgit v1.2.3 From c3278587e7d34cfbc1d38d3ae25923343af7752a Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:10:23 -0800 Subject: igb: clean up code for setting MAC address Drop a bunch of hand written byte swapping code in favor of just doing the byte swapping ourselves. The registers are little endian registers storing a big endian value so if we read the MAC address array as little endian then we will get the CPU registers into the proper layout. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 85c47aa16a31..02f19e45d6fd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7698,15 +7698,14 @@ static void igb_io_resume(struct pci_dev *pdev) static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, u8 qsel) { - u32 rar_low, rar_high; struct e1000_hw *hw = &adapter->hw; + u32 rar_low, rar_high; /* HW expects these in little endian so we reverse the byte order - * from network order (big endian) to little endian + * from network order (big endian) to CPU endian */ - rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | - ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); - rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + rar_low = le32_to_cpup((__be32 *)(addr)); + rar_high = le16_to_cpup((__be16 *)(addr + 4)); /* Indicate to hardware the Address is Valid. */ rar_high |= E1000_RAH_AV; -- cgit v1.2.3 From 832e821c51e381966464c8a0f30f12eb1514eba0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:10:30 -0800 Subject: igb: Refactor VFTA configuration This patch starts the clean-up process on the VFTA configuration. Specifically in this patch I attempt to address and simplify several items while also updating the code to bring it more inline with what is already in ixgbe. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 37 ++++++++-- drivers/net/ethernet/intel/igb/e1000_hw.h | 2 +- drivers/net/ethernet/intel/igb/e1000_mac.c | 102 +++++++++------------------ drivers/net/ethernet/intel/igb/e1000_mac.h | 2 +- 4 files changed, 67 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index adb33e2a0137..fff50523b440 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -34,6 +34,7 @@ #include "e1000_mac.h" #include "e1000_82575.h" #include "e1000_i210.h" +#include "igb.h" static s32 igb_get_invariants_82575(struct e1000_hw *); static s32 igb_acquire_phy_82575(struct e1000_hw *); @@ -71,6 +72,32 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw); static const u16 e1000_82580_rxpbs_table[] = { 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 }; +/* Due to a hw errata, if the host tries to configure the VFTA register + * while performing queries from the BMC or DMA, then the VFTA in some + * cases won't be written. + */ + +/** + * igb_write_vfta_i350 - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) +{ + struct igb_adapter *adapter = hw->back; + int i; + + for (i = 10; i--;) + array_wr32(E1000_VFTA, offset, value); + + wrfl(); + adapter->shadow_vfta[offset] = value; +} + /** * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO * @hw: pointer to the HW structure @@ -429,6 +456,11 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) mac->ops.release_swfw_sync = igb_release_swfw_sync_82575; } + if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) + mac->ops.write_vfta = igb_write_vfta_i350; + else + mac->ops.write_vfta = igb_write_vfta; + /* Set if part includes ASF firmware */ mac->asf_firmware_present = true; /* Set if manageability features are enabled. */ @@ -1517,10 +1549,7 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw) /* Disabling VLAN filtering */ hw_dbg("Initializing the IEEE VLAN\n"); - if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) - igb_clear_vfta_i350(hw); - else - igb_clear_vfta(hw); + igb_clear_vfta(hw); /* Setup the receive address */ igb_init_rx_addrs(hw, rar_count); diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index 4034207eb5cc..f0c416e21d2c 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -325,7 +325,7 @@ struct e1000_mac_operations { s32 (*get_thermal_sensor_data)(struct e1000_hw *); s32 (*init_thermal_sensor_thresh)(struct e1000_hw *); #endif - + void (*write_vfta)(struct e1000_hw *, u32, u32); }; struct e1000_phy_operations { diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 2a88595f956c..97f6fae48d1d 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -92,10 +92,8 @@ void igb_clear_vfta(struct e1000_hw *hw) { u32 offset; - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - array_wr32(E1000_VFTA, offset, 0); - wrfl(); - } + for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;) + hw->mac.ops.write_vfta(hw, offset, 0); } /** @@ -107,54 +105,14 @@ void igb_clear_vfta(struct e1000_hw *hw) * Writes value at the given offset in the register array which stores * the VLAN filter table. **/ -static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) { + struct igb_adapter *adapter = hw->back; + array_wr32(E1000_VFTA, offset, value); wrfl(); -} - -/* Due to a hw errata, if the host tries to configure the VFTA register - * while performing queries from the BMC or DMA, then the VFTA in some - * cases won't be written. - */ - -/** - * igb_clear_vfta_i350 - Clear VLAN filter table - * @hw: pointer to the HW structure - * - * Clears the register array which contains the VLAN filter table by - * setting all the values to 0. - **/ -void igb_clear_vfta_i350(struct e1000_hw *hw) -{ - u32 offset; - int i; - - for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, 0); - - wrfl(); - } -} - -/** - * igb_write_vfta_i350 - Write value to VLAN filter table - * @hw: pointer to the HW structure - * @offset: register offset in VLAN filter table - * @value: register value written to VLAN filter table - * - * Writes value at the given offset in the register array which stores - * the VLAN filter table. - **/ -static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value) -{ - int i; - for (i = 0; i < 10; i++) - array_wr32(E1000_VFTA, offset, value); - - wrfl(); + adapter->shadow_vfta[offset] = value; } /** @@ -185,38 +143,42 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) /** * igb_vfta_set - enable or disable vlan in VLAN filter table * @hw: pointer to the HW structure - * @vid: VLAN id to add or remove - * @add: if true add filter, if false remove + * @vlan: VLAN id to add or remove + * @vlan_on: if true add filter, if false remove * * Sets or clears a bit in the VLAN filter table array based on VLAN id * and if we are adding or removing the filter **/ -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, bool vlan_on) { - u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; - u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); - u32 vfta; struct igb_adapter *adapter = hw->back; - s32 ret_val = 0; + u32 regidx, vfta_delta, vfta; + + if (vlan > 4095) + return E1000_ERR_PARAM; + + /* Part 1 + * The VFTA is a bitstring made up of 128 32-bit registers + * that enable the particular VLAN id, much like the MTA: + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = adapter->shadow_vfta[regidx]; - vfta = adapter->shadow_vfta[index]; + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update vfta using an XOR. + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; /* bit was set/cleared before we started */ - if ((!!(vfta & mask)) == add) { - ret_val = -E1000_ERR_CONFIG; - } else { - if (add) - vfta |= mask; - else - vfta &= ~mask; - } - if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354)) - igb_write_vfta_i350(hw, index, vfta); - else - igb_write_vfta(hw, index, vfta); - adapter->shadow_vfta[index] = vfta; + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); - return ret_val; + return 0; } /** diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index ea24961b0d70..4fbb953012d0 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -56,7 +56,7 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, void igb_clear_hw_cntrs_base(struct e1000_hw *hw); void igb_clear_vfta(struct e1000_hw *hw); -void igb_clear_vfta_i350(struct e1000_hw *hw); +void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); void igb_config_collision_dist(struct e1000_hw *hw); void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); -- cgit v1.2.3 From 45693bcb00cbd379c373ab22ccd9a9d4755cc7ed Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:10:39 -0800 Subject: igb: Allow asymmetric configuration of MTU versus Rx frame size Since the igb driver is using page based receive there is no point in limiting the Rx capabilities of the device. The driver can receive 9K jumbo frames at all times. The only changes needed due to MTU changes are updates for the FIFO sizes and flow-control watermarks. Update the maximum frame size to reflect the 9.5K limitation of the hardware, and replace all instances of max_frame_size with MAX_JUMBO_FRAME_SIZE when referring to an Rx FIFO or frame. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_defines.h | 3 +- drivers/net/ethernet/intel/igb/igb_main.c | 107 +++++++++---------------- 2 files changed, 42 insertions(+), 68 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index c3c598c347a9..e9f23ee8f15e 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -356,7 +356,8 @@ /* Ethertype field values */ #define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ -#define MAX_JUMBO_FRAME_SIZE 0x3F00 +/* As per the EAS the maximum supported size is 9.5KB (9728 bytes) */ +#define MAX_JUMBO_FRAME_SIZE 0x2600 /* PBA constants */ #define E1000_PBA_34K 0x0022 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 02f19e45d6fd..b6768812aa01 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1862,7 +1862,7 @@ void igb_reset(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct e1000_mac_info *mac = &hw->mac; struct e1000_fc_info *fc = &hw->fc; - u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm; + u32 pba, hwm; /* Repartition Pba for greater than 9k mtu * To take effect CTRL.RST is required. @@ -1886,9 +1886,10 @@ void igb_reset(struct igb_adapter *adapter) break; } - if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) && - (mac->type < e1000_82576)) { - /* adjust PBA for jumbo frames */ + if (mac->type == e1000_82575) { + u32 min_rx_space, min_tx_space, needed_tx_space; + + /* write Rx PBA so that hardware can report correct Tx PBA */ wr32(E1000_PBA, pba); /* To maintain wire speed transmits, the Tx FIFO should be @@ -1898,31 +1899,26 @@ void igb_reset(struct igb_adapter *adapter) * one full receive packet and is similarly rounded up and * expressed in KB. */ - pba = rd32(E1000_PBA); - /* upper 16 bits has Tx packet buffer allocation size in KB */ - tx_space = pba >> 16; - /* lower 16 bits has Rx packet buffer allocation size in KB */ - pba &= 0xffff; - /* the Tx fifo also stores 16 bytes of information about the Tx - * but don't include ethernet FCS because hardware appends it + min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024); + + /* The Tx FIFO also stores 16 bytes of information about the Tx + * but don't include Ethernet FCS because hardware appends it. + * We only need to round down to the nearest 512 byte block + * count since the value we care about is 2 frames, not 1. */ - min_tx_space = (adapter->max_frame_size + - sizeof(union e1000_adv_tx_desc) - - ETH_FCS_LEN) * 2; - min_tx_space = ALIGN(min_tx_space, 1024); - min_tx_space >>= 10; - /* software strips receive CRC, so leave room for it */ - min_rx_space = adapter->max_frame_size; - min_rx_space = ALIGN(min_rx_space, 1024); - min_rx_space >>= 10; + min_tx_space = adapter->max_frame_size; + min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN; + min_tx_space = DIV_ROUND_UP(min_tx_space, 512); + + /* upper 16 bits has Tx packet buffer allocation size in KB */ + needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16); /* If current Tx allocation is less than the min Tx FIFO size, * and the min Tx FIFO size is less than the current Rx FIFO - * allocation, take space away from current Rx allocation + * allocation, take space away from current Rx allocation. */ - if (tx_space < min_tx_space && - ((min_tx_space - tx_space) < pba)) { - pba = pba - (min_tx_space - tx_space); + if (needed_tx_space < pba) { + pba -= needed_tx_space; /* if short on Rx space, Rx wins and must trump Tx * adjustment @@ -1930,18 +1926,20 @@ void igb_reset(struct igb_adapter *adapter) if (pba < min_rx_space) pba = min_rx_space; } + + /* adjust PBA for jumbo frames */ wr32(E1000_PBA, pba); } - /* flow control settings */ - /* The high water mark must be low enough to fit one full frame - * (or the size used for early receive) above it in the Rx FIFO. - * Set it to the lower of: - * - 90% of the Rx FIFO size, or - * - the full Rx FIFO size minus one full frame + /* flow control settings + * The high water mark must be low enough to fit one full frame + * after transmitting the pause frame. As such we must have enough + * space to allow for us to complete our current transmit and then + * receive the frame that is in progress from the link partner. + * Set it to: + * - the full Rx FIFO size minus one full Tx plus one full Rx frame */ - hwm = min(((pba << 10) * 9 / 10), - ((pba << 10) - 2 * adapter->max_frame_size)); + hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE); fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */ fc->low_water = fc->high_water - 16; @@ -3492,7 +3490,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) /* disable store bad packets and clear size bits. */ rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - /* enable LPE to prevent packets larger than max_frame_size */ + /* enable LPE to allow for reception of jumbo frames */ rctl |= E1000_RCTL_LPE; /* disable queue 0 to prevent tail write w/o re-config */ @@ -3548,32 +3546,6 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, return 0; } -/** - * igb_rlpml_set - set maximum receive packet size - * @adapter: board private structure - * - * Configure maximum receivable packet size. - **/ -static void igb_rlpml_set(struct igb_adapter *adapter) -{ - u32 max_frame_size = adapter->max_frame_size; - struct e1000_hw *hw = &adapter->hw; - u16 pf_id = adapter->vfs_allocated_count; - - if (pf_id) { - igb_set_vf_rlpml(adapter, max_frame_size, pf_id); - /* If we're in VMDQ or SR-IOV mode, then set global RLPML - * to our max jumbo frame size, in case we need to enable - * jumbo frames on one of the rings later. - * This will not pass over-length frames into the default - * queue because it's gated by the VMOLR.RLPML. - */ - max_frame_size = MAX_JUMBO_FRAME_SIZE; - } - - wr32(E1000_RLPML, max_frame_size); -} - static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn, bool aupe) { @@ -4067,7 +4039,14 @@ static void igb_set_rx_mode(struct net_device *netdev) vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); + + /* enable Rx jumbo frames, no need for restriction */ + vmolr &= ~E1000_VMOLR_RLPML_MASK; + vmolr |= MAX_JUMBO_FRAME_SIZE | E1000_VMOLR_LPE; + wr32(E1000_VMOLR(vfn), vmolr); + wr32(E1000_RLPML, MAX_JUMBO_FRAME_SIZE); + igb_restore_vf_multicasts(adapter); } @@ -7195,8 +7174,6 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) ctrl &= ~E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); } - - igb_rlpml_set(adapter); } static int igb_vlan_rx_add_vid(struct net_device *netdev, @@ -7952,9 +7929,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) * than the Rx threshold. Set hwm to PBA - max frame * size in 16B units, capping it at PBA - 6KB. */ - hwm = 64 * pba - adapter->max_frame_size / 16; - if (hwm < 64 * (pba - 6)) - hwm = 64 * (pba - 6); + hwm = 64 * (pba - 6); reg = rd32(E1000_FCRTC); reg &= ~E1000_FCRTC_RTH_COAL_MASK; reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT) @@ -7964,9 +7939,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Set the DMA Coalescing Rx threshold to PBA - 2 * max * frame size, capping it at PBA - 10KB. */ - dmac_thr = pba - adapter->max_frame_size / 512; - if (dmac_thr < pba - 10) - dmac_thr = pba - 10; + dmac_thr = pba - 10; reg = rd32(E1000_DMACR); reg &= ~E1000_DMACR_DMACTHR_MASK; reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT) -- cgit v1.2.3 From d3836f8e2517fb04328c673989fd780030926694 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:10:47 -0800 Subject: igb: Do not factor VLANs into RLPML calculation The RLPML registers already take the size of VLAN headers into account when determining the maximum packet length. This is called out in EAS documents for several parts including the 82576 and the i350. As such we can drop the addition of size to the value programmed into the RLPML registers. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 1 - drivers/net/ethernet/intel/igb/igb_main.c | 43 ++----------------------------- 2 files changed, 2 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index e3cb93bdb21a..d135261f8602 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -95,7 +95,6 @@ struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; u16 num_vf_mc_hashes; - u16 vlans_enabled; u32 flags; unsigned long last_nack; u16 pf_vlan; /* When set, guest VLAN config not allowed. */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b6768812aa01..9461480e4eff 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3531,12 +3531,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, struct e1000_hw *hw = &adapter->hw; u32 vmolr; - /* if it isn't the PF check to see if VFs are enabled and - * increase the size to support vlan tags - */ - if (vfn < adapter->vfs_allocated_count && - adapter->vf_data[vfn].vlans_enabled) - size += VLAN_TAG_SIZE; + if (size > MAX_JUMBO_FRAME_SIZE) + size = MAX_JUMBO_FRAME_SIZE; vmolr = rd32(E1000_VMOLR(vfn)); vmolr &= ~E1000_VMOLR_RLPML_MASK; @@ -5787,8 +5783,6 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) wr32(E1000_VLVF(i), reg); } - - adapter->vf_data[vf].vlans_enabled = 0; } static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) @@ -5837,23 +5831,6 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) reg &= ~E1000_VLVF_VLANID_MASK; reg |= vid; wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size += 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } - - adapter->vf_data[vf].vlans_enabled++; } } else { if (i < E1000_VLVF_ARRAY_SIZE) { @@ -5865,22 +5842,6 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) igb_vfta_set(hw, vid, false); } wr32(E1000_VLVF(i), reg); - - /* do not modify RLPML for PF devices */ - if (vf >= adapter->vfs_allocated_count) - return 0; - - adapter->vf_data[vf].vlans_enabled--; - if (!adapter->vf_data[vf].vlans_enabled) { - u32 size; - - reg = rd32(E1000_VMOLR(vf)); - size = reg & E1000_VMOLR_RLPML_MASK; - size -= 4; - reg &= ~E1000_VMOLR_RLPML_MASK; - reg |= size; - wr32(E1000_VMOLR(vf), reg); - } } } return 0; -- cgit v1.2.3 From 5982a5565a08abf3b9ff18941b3e3cc94f7c8286 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:10:54 -0800 Subject: igb: Always enable VLAN 0 even if 8021q is not loaded This patch makes it so that we always add VLAN 0. This is important as we need to guarantee the PF can receive untagged frames in the case of SR-IOV being enabled but VLAN filtering not being enabled in the kernel. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9461480e4eff..f3e17385f737 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7177,11 +7177,12 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, static void igb_restore_vlan(struct igb_adapter *adapter) { - u16 vid; + u16 vid = 1; igb_vlan_mode(adapter->netdev, adapter->netdev->features); + igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); - for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); } -- cgit v1.2.3 From 8b77c6b20f32511175dfd00322ae82fb31949d55 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:11:04 -0800 Subject: igb: Merge VLVF configuration into igb_vfta_set This change makes it so that we can merge the configuration of the VLVF registers into the setting of the VFTA register. By doing this we simplify the logic and make use of similar functionality that we have already added for ixgbe making it easier to maintain both drivers. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_mac.c | 119 ++++++++++++++++++++++++++++- drivers/net/ethernet/intel/igb/e1000_mac.h | 3 +- drivers/net/ethernet/intel/igb/igb_main.c | 105 +++++-------------------- 3 files changed, 135 insertions(+), 92 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index 97f6fae48d1d..07cf4fe58338 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -140,22 +140,70 @@ void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) hw->mac.ops.rar_set(hw, mac_addr, i); } +/** + * igb_find_vlvf_slot - find the VLAN id or the first empty slot + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter + * @vlvf_bypass: skip VLVF if no match is found + * + * return the VLVF index where this VLAN id should be placed + * + **/ +static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass) +{ + s32 regindex, first_empty_slot; + u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0; + + /* Search for the VLAN id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) { + bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK; + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) + first_empty_slot = regindex; + } + + return first_empty_slot ? : -E1000_ERR_NO_SPACE; +} + /** * igb_vfta_set - enable or disable vlan in VLAN filter table * @hw: pointer to the HW structure * @vlan: VLAN id to add or remove + * @vind: VMDq output index that maps queue to VLAN id * @vlan_on: if true add filter, if false remove * * Sets or clears a bit in the VLAN filter table array based on VLAN id * and if we are adding or removing the filter **/ -s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, bool vlan_on) +s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) { struct igb_adapter *adapter = hw->back; - u32 regidx, vfta_delta, vfta; + u32 regidx, vfta_delta, vfta, bits; + s32 vlvf_index; - if (vlan > 4095) - return E1000_ERR_PARAM; + if ((vlan > 4095) || (vind > 7)) + return -E1000_ERR_PARAM; + + /* this is a 2 part operation - first the VFTA, then the + * VLVF and VLVFB if VT Mode is set + * We don't write the VFTA until we know the VLVF part succeeded. + */ /* Part 1 * The VFTA is a bitstring made up of 128 32-bit registers @@ -174,6 +222,69 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, bool vlan_on) vfta_delta &= vlan_on ? ~vfta : vfta; vfta ^= vfta_delta; + /* Part 2 + * If VT Mode is set + * Either vlan_on + * make sure the VLAN is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ + if (!adapter->vfs_allocated_count) + goto vfta_update; + + vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) { + if (vlvf_bypass) + goto vfta_update; + return vlvf_index; + } + + bits = rd32(E1000_VLVF(vlvf_index)); + + /* set the pool bit */ + bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind); + + if (!(bits & E1000_VLVF_POOLSEL_MASK)) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool + */ + if (vfta_delta) + hw->mac.ops.write_vfta(hw, regidx, vfta); + + /* disable VLVF and clear remaining bit from pool */ + wr32(E1000_VLVF(vlvf_index), 0); + + return 0; + } + + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE); + +vfta_update: /* bit was set/cleared before we started */ if (vfta_delta) hw->mac.ops.write_vfta(hw, regidx, vfta); diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h index 4fbb953012d0..90c8893c3eed 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.h +++ b/drivers/net/ethernet/intel/igb/e1000_mac.h @@ -57,7 +57,8 @@ s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, void igb_clear_hw_cntrs_base(struct e1000_hw *hw); void igb_clear_vfta(struct e1000_hw *hw); void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); -s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); +s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, u32 vind, + bool vlan_on, bool vlvf_bypass); void igb_config_collision_dist(struct e1000_hw *hw); void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); void igb_mta_set(struct e1000_hw *hw, u32 hash_value); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f3e17385f737..e7c3a9451b3f 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1534,12 +1534,13 @@ static void igb_irq_enable(struct igb_adapter *adapter) static void igb_update_mng_vlan(struct igb_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + u16 pf_id = adapter->vfs_allocated_count; u16 vid = adapter->hw.mng_cookie.vlan_id; u16 old_vid = adapter->mng_vlan_id; if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { /* add VID to filter table */ - igb_vfta_set(hw, vid, true); + igb_vfta_set(hw, vid, pf_id, true, true); adapter->mng_vlan_id = vid; } else { adapter->mng_vlan_id = IGB_MNG_VLAN_NONE; @@ -1549,7 +1550,7 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter) (vid != old_vid) && !test_bit(old_vid, adapter->active_vlans)) { /* remove VID from filter table */ - igb_vfta_set(hw, old_vid, false); + igb_vfta_set(hw, vid, pf_id, false, true); } } @@ -5778,75 +5779,13 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) (reg & E1000_VLVF_VLANID_ENABLE)) { reg = 0; vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, false); + igb_vfta_set(hw, vid, vf, false, true); } wr32(E1000_VLVF(i), reg); } } -static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - u32 reg, i; - - /* The vlvf table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return -1; - - /* we only need to do this if VMDq is enabled */ - if (!adapter->vfs_allocated_count) - return -1; - - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) - break; - } - - if (add) { - if (i == E1000_VLVF_ARRAY_SIZE) { - /* Did not find a matching VLAN ID entry that was - * enabled. Search for a free filter entry, i.e. - * one without the enable bit set - */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if (!(reg & E1000_VLVF_VLANID_ENABLE)) - break; - } - } - if (i < E1000_VLVF_ARRAY_SIZE) { - /* Found an enabled/available entry */ - reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - - /* if !enabled we need to set this up in vfta */ - if (!(reg & E1000_VLVF_VLANID_ENABLE)) { - /* add VID to filter table */ - igb_vfta_set(hw, vid, true); - reg |= E1000_VLVF_VLANID_ENABLE; - } - reg &= ~E1000_VLVF_VLANID_MASK; - reg |= vid; - wr32(E1000_VLVF(i), reg); - } - } else { - if (i < E1000_VLVF_ARRAY_SIZE) { - /* remove vf from the pool */ - reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK)) { - reg = 0; - igb_vfta_set(hw, vid, false); - } - wr32(E1000_VLVF(i), reg); - } - } - return 0; -} - static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) { struct e1000_hw *hw = &adapter->hw; @@ -5860,13 +5799,14 @@ static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) { - int err = 0; struct igb_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err = 0; if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) return -EINVAL; if (vlan || qos) { - err = igb_vlvf_set(adapter, vlan, !!vlan, vf); + err = igb_vfta_set(hw, vlan, vf, !!vlan, false); if (err) goto out; igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); @@ -5882,8 +5822,8 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev, "Bring the PF device up before attempting to use the VF device.\n"); } } else { - igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, - false, vf); + igb_vfta_set(hw, adapter->vf_data[vf].pf_vlan, vf, + false, false); igb_set_vmvir(adapter, vlan, vf); igb_set_vmolr(adapter, vf, true); adapter->vf_data[vf].pf_vlan = 0; @@ -5924,12 +5864,12 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) * the VLAN filter set. */ if (add && (adapter->netdev->flags & IFF_PROMISC)) - err = igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); + err = igb_vfta_set(hw, vid, adapter->vfs_allocated_count, + true, false); if (err) goto out; - err = igb_vlvf_set(adapter, vid, add, vf); + err = igb_vfta_set(hw, vid, vf, !!add, false); if (err) goto out; @@ -5956,8 +5896,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) if ((vlvf & VLAN_VID_MASK) == vid && !test_bit(vid, adapter->active_vlans) && !bits) - igb_vlvf_set(adapter, vid, add, - adapter->vfs_allocated_count); + igb_vfta_set(hw, vid, adapter->vfs_allocated_count, + false, false); } out: @@ -7144,12 +7084,8 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - /* attempt to add filter to vlvf array */ - igb_vlvf_set(adapter, vid, true, pf_id); - /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, true); - + igb_vfta_set(hw, vid, pf_id, true, true); set_bit(vid, adapter->active_vlans); return 0; @@ -7159,16 +7095,11 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) { struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; int pf_id = adapter->vfs_allocated_count; - s32 err; - - /* remove vlan from VLVF table array */ - err = igb_vlvf_set(adapter, vid, false, pf_id); + struct e1000_hw *hw = &adapter->hw; - /* if vid was not present in VLVF just remove it from table */ - if (err) - igb_vfta_set(hw, vid, false); + /* remove VID from filter table */ + igb_vfta_set(hw, vid, pf_id, false, true); clear_bit(vid, adapter->active_vlans); -- cgit v1.2.3 From a15d92598a5a741037b873fd4a43595a63048bbd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:11:11 -0800 Subject: igb: Clean-up configuration of VF port VLANs This patch is meant to clean-up the configuration of the VF port based VLAN configuration. The original logic was a bit muddled and had some undesirable side effects such as VLANs being either completely stripped from the port or VLANs being left when they shouldn't be. The idea behind this code is to avoid any events such as spurious spoof notifications when we are removing one VLAN tag and replacing it with another. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 181 ++++++++++++++++++------------ 1 file changed, 110 insertions(+), 71 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e7c3a9451b3f..6876ae5e18a9 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5786,53 +5786,6 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) } } -static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) -{ - struct e1000_hw *hw = &adapter->hw; - - if (vid) - wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); - else - wr32(E1000_VMVIR(vf), 0); -} - -static int igb_ndo_set_vf_vlan(struct net_device *netdev, - int vf, u16 vlan, u8 qos) -{ - struct igb_adapter *adapter = netdev_priv(netdev); - struct e1000_hw *hw = &adapter->hw; - int err = 0; - - if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) - return -EINVAL; - if (vlan || qos) { - err = igb_vfta_set(hw, vlan, vf, !!vlan, false); - if (err) - goto out; - igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); - igb_set_vmolr(adapter, vf, !vlan); - adapter->vf_data[vf].pf_vlan = vlan; - adapter->vf_data[vf].pf_qos = qos; - dev_info(&adapter->pdev->dev, - "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_warn(&adapter->pdev->dev, - "The VF VLAN has been set, but the PF device is not up.\n"); - dev_warn(&adapter->pdev->dev, - "Bring the PF device up before attempting to use the VF device.\n"); - } - } else { - igb_vfta_set(hw, adapter->vf_data[vf].pf_vlan, vf, - false, false); - igb_set_vmvir(adapter, vlan, vf); - igb_set_vmolr(adapter, vf, true); - adapter->vf_data[vf].pf_vlan = 0; - adapter->vf_data[vf].pf_qos = 0; - } -out: - return err; -} - static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) { struct e1000_hw *hw = &adapter->hw; @@ -5853,23 +5806,25 @@ static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) return i; } -static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, + bool add, u32 vf) { + int pf_id = adapter->vfs_allocated_count; struct e1000_hw *hw = &adapter->hw; - int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; - int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); - int err = 0; + int err; - /* If in promiscuous mode we need to make sure the PF also has - * the VLAN filter set. + /* If VLAN overlaps with one the PF is currently monitoring make + * sure that we are able to allocate a VLVF entry. This may be + * redundant but it guarantees PF will maintain visibility to + * the VLAN. */ - if (add && (adapter->netdev->flags & IFF_PROMISC)) - err = igb_vfta_set(hw, vid, adapter->vfs_allocated_count, - true, false); - if (err) - goto out; + if (add && (adapter->netdev->flags & IFF_PROMISC)) { + err = igb_vfta_set(hw, vid, pf_id, true, false); + if (err) + return err; + } - err = igb_vfta_set(hw, vid, vf, !!add, false); + err = igb_vfta_set(hw, vid, vf, add, false); if (err) goto out; @@ -5904,23 +5859,107 @@ out: return err; } -static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf) { - /* clear flags - except flag that indicates PF has set the MAC */ - adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC; - adapter->vf_data[vf].last_nack = jiffies; + struct e1000_hw *hw = &adapter->hw; - /* reset offloads to defaults */ + if (vid) + wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT)); + else + wr32(E1000_VMVIR(vf), 0); +} + +static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, + u16 vlan, u8 qos) +{ + int err; + + err = igb_set_vf_vlan(adapter, vlan, true, vf); + if (err) + return err; + + igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vlan); + + /* revoke access to previous VLAN */ + if (vlan != adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = vlan; + adapter->vf_data[vf].pf_qos = qos; + dev_info(&adapter->pdev->dev, + "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IGB_DOWN, &adapter->state)) { + dev_warn(&adapter->pdev->dev, + "The VF VLAN has been set, but the PF device is not up.\n"); + dev_warn(&adapter->pdev->dev, + "Bring the PF device up before attempting to use the VF device.\n"); + } + + return err; +} + +static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) +{ + /* Restore tagless access via VLAN 0 */ + igb_set_vf_vlan(adapter, 0, true, vf); + + igb_set_vmvir(adapter, 0, vf); igb_set_vmolr(adapter, vf, true); + /* Remove any PF assigned VLAN */ + if (adapter->vf_data[vf].pf_vlan) + igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan, + false, vf); + + adapter->vf_data[vf].pf_vlan = 0; + adapter->vf_data[vf].pf_qos = 0; + + return 0; +} + +static int igb_ndo_set_vf_vlan(struct net_device *netdev, + int vf, u16 vlan, u8 qos) +{ + struct igb_adapter *adapter = netdev_priv(netdev); + + if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7)) + return -EINVAL; + + return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) : + igb_disable_port_vlan(adapter, vf); +} + +static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) +{ + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + + if (adapter->vf_data[vf].pf_vlan) + return -1; + + /* VLAN 0 is a special case, don't allow it to be removed */ + if (!vid && !add) + return 0; + + return igb_set_vf_vlan(adapter, vid, !!add, vf); +} + +static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) +{ + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; + + /* clear flags - except flag that indicates PF has set the MAC */ + vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC; + vf_data->last_nack = jiffies; + /* reset vlans for device */ igb_clear_vf_vfta(adapter, vf); - if (adapter->vf_data[vf].pf_vlan) - igb_ndo_set_vf_vlan(adapter->netdev, vf, - adapter->vf_data[vf].pf_vlan, - adapter->vf_data[vf].pf_qos); - else - igb_clear_vf_vfta(adapter, vf); + igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf); + igb_set_vmvir(adapter, vf_data->pf_vlan | + (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); + igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; @@ -6065,7 +6104,7 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n", vf); else - retval = igb_set_vf_vlan(adapter, msgbuf, vf); + retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf); break; default: dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); -- cgit v1.2.3 From 16903caa339961b9f8a68b64f4f313789de48599 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:11:18 -0800 Subject: igb: Add support for VLAN promiscuous with SR-IOV and NTUPLE This change fixes things so that we can fully support SR-IOV or the recently added NTUPLE filtering while allowing support for VLAN promiscuous mode. By making this change we are able to support possible scenarios such as SR-IOV with the PF connected to a Linux bridge hosting other VMs. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 1 + drivers/net/ethernet/intel/igb/igb_main.c | 313 +++++++++++++++++++++++------- 2 files changed, 242 insertions(+), 72 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index d135261f8602..707ae5c297ea 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -481,6 +481,7 @@ struct igb_adapter { #define IGB_FLAG_MAS_ENABLE (1 << 12) #define IGB_FLAG_HAS_MSIX (1 << 13) #define IGB_FLAG_EEE (1 << 14) +#define IGB_FLAG_VLAN_PROMISC BIT(15) /* Media Auto Sense */ #define IGB_MAS_ENABLE_0 0X0001 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 6876ae5e18a9..7366d4ff0400 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1819,6 +1819,10 @@ void igb_down(struct igb_adapter *adapter) if (!pci_channel_offline(adapter->pdev)) igb_reset(adapter); + + /* clear VLAN promisc flag so VFTA will be updated if necessary */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + igb_clean_all_tx_rings(adapter); igb_clean_all_rx_rings(adapter); #ifdef CONFIG_IGB_DCA @@ -2050,7 +2054,7 @@ static int igb_set_features(struct net_device *netdev, if (changed & NETIF_F_HW_VLAN_CTAG_RX) igb_vlan_mode(netdev, features); - if (!(changed & NETIF_F_RXALL)) + if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; netdev->features = features; @@ -3515,8 +3519,7 @@ void igb_setup_rctl(struct igb_adapter *adapter) E1000_RCTL_BAM | /* RX All Bcast Pkts */ E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ - rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ - E1000_RCTL_DPF | /* Allow filtered pause */ + rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */ E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ /* Do not mess with E1000_CTRL_VME, it affects transmit as well, * and that breaks VLANs. @@ -3967,6 +3970,130 @@ static int igb_write_uc_addr_list(struct net_device *netdev) return count; } +static int igb_vlan_promisc_enable(struct igb_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, pf_id; + + switch (hw->mac.type) { + case e1000_i210: + case e1000_i211: + case e1000_i350: + /* VLAN filtering needed for VLAN prio filter */ + if (adapter->netdev->features & NETIF_F_NTUPLE) + break; + /* fall through */ + case e1000_82576: + case e1000_82580: + case e1000_i354: + /* VLAN filtering needed for pool filtering */ + if (adapter->vfs_allocated_count) + break; + /* fall through */ + default: + return 1; + } + + /* We are already in VLAN promisc, nothing to do */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + return 0; + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + /* Add PF to all active pools */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + vlvf |= 1 << pf_id; + wr32(E1000_VLVF(i), vlvf); + } + +set_vfta: + /* Set all bits in the VLAN filter table array */ + for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;) + hw->mac.ops.write_vfta(hw, i, ~0U); + + /* Set flag so we don't redo unnecessary work */ + adapter->flags |= IGB_FLAG_VLAN_PROMISC; + + return 0; +} + +#define VFTA_BLOCK_SIZE 8 +static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset) +{ + struct e1000_hw *hw = &adapter->hw; + u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; + u32 vid_start = vfta_offset * 32; + u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); + u32 i, vid, word, bits, pf_id; + + /* guarantee that we don't scrub out management VLAN */ + vid = adapter->mng_vlan_id; + if (vid >= vid_start && vid < vid_end) + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + if (!adapter->vfs_allocated_count) + goto set_vfta; + + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + + for (i = E1000_VLVF_ARRAY_SIZE; --i;) { + u32 vlvf = rd32(E1000_VLVF(i)); + + /* pull VLAN ID from VLVF */ + vid = vlvf & VLAN_VID_MASK; + + /* only concern ourselves with a certain range */ + if (vid < vid_start || vid >= vid_end) + continue; + + if (vlvf & E1000_VLVF_VLANID_ENABLE) { + /* record VLAN ID in VFTA */ + vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); + + /* if PF is part of this then continue */ + if (test_bit(vid, adapter->active_vlans)) + continue; + } + + /* remove PF from the pool */ + bits = ~(1 << pf_id); + bits &= rd32(E1000_VLVF(i)); + wr32(E1000_VLVF(i), bits); + } + +set_vfta: + /* extract values from active_vlans and write back to VFTA */ + for (i = VFTA_BLOCK_SIZE; i--;) { + vid = (vfta_offset + i) * 32; + word = vid / BITS_PER_LONG; + bits = vid % BITS_PER_LONG; + + vfta[i] |= adapter->active_vlans[word] >> bits; + + hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]); + } +} + +static void igb_vlan_promisc_disable(struct igb_adapter *adapter) +{ + u32 i; + + /* We are not in VLAN promisc, nothing to do */ + if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + return; + + /* Set flag so we don't redo unnecessary work */ + adapter->flags &= ~IGB_FLAG_VLAN_PROMISC; + + for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE) + igb_scrub_vfta(adapter, i); +} + /** * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set * @netdev: network interface device structure @@ -3981,21 +4108,13 @@ static void igb_set_rx_mode(struct net_device *netdev) struct igb_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; unsigned int vfn = adapter->vfs_allocated_count; - u32 rctl, vmolr = 0; + u32 rctl = 0, vmolr = 0; int count; /* Check for Promiscuous and All Multicast modes */ - rctl = rd32(E1000_RCTL); - - /* clear the effected bits */ - rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE); - if (netdev->flags & IFF_PROMISC) { - /* retain VLAN HW filtering if in VT mode */ - if (adapter->vfs_allocated_count) - rctl |= E1000_RCTL_VFE; - rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); - vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME); + rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; + vmolr |= E1000_VMOLR_ROPE | E1000_VMOLR_MPME; } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; @@ -4022,8 +4141,24 @@ static void igb_set_rx_mode(struct net_device *netdev) rctl |= E1000_RCTL_UPE; vmolr |= E1000_VMOLR_ROPE; } - rctl |= E1000_RCTL_VFE; } + + /* enable VLAN filtering by default */ + rctl |= E1000_RCTL_VFE; + + /* disable VLAN filtering for modes that require it */ + if ((netdev->flags & IFF_PROMISC) || + (netdev->features & NETIF_F_RXALL)) { + /* if we fail to set all rules then just clear VFE */ + if (igb_vlan_promisc_enable(adapter)) + rctl &= ~E1000_RCTL_VFE; + } else { + igb_vlan_promisc_disable(adapter); + } + + /* update state of unicast, multicast, and VLAN filtering modes */ + rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE | + E1000_RCTL_VFE); wr32(E1000_RCTL, rctl); /* In order to support SR-IOV and eventually VMDq it is necessary to set @@ -5762,48 +5897,98 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter) static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; - u32 pool_mask, reg, vid; - int i; + u32 pool_mask, vlvf_mask, i; + + /* create mask for VF and other pools */ + pool_mask = E1000_VLVF_POOLSEL_MASK; + vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); - pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); + /* drop PF from pool bits */ + pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + + adapter->vfs_allocated_count)); /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); + for (i = E1000_VLVF_ARRAY_SIZE; i--;) { + u32 vlvf = rd32(E1000_VLVF(i)); + u32 vfta_mask, vid, vfta; /* remove the vf from the pool */ - reg &= ~pool_mask; - - /* if pool is empty then remove entry from vfta */ - if (!(reg & E1000_VLVF_POOLSEL_MASK) && - (reg & E1000_VLVF_VLANID_ENABLE)) { - reg = 0; - vid = reg & E1000_VLVF_VLANID_MASK; - igb_vfta_set(hw, vid, vf, false, true); - } + if (!(vlvf & vlvf_mask)) + continue; + + /* clear out bit from VLVF */ + vlvf ^= vlvf_mask; + + /* if other pools are present, just remove ourselves */ + if (vlvf & pool_mask) + goto update_vlvfb; + + /* if PF is present, leave VFTA */ + if (vlvf & E1000_VLVF_POOLSEL_MASK) + goto update_vlvf; + + vid = vlvf & E1000_VLVF_VLANID_MASK; + vfta_mask = 1 << (vid % 32); - wr32(E1000_VLVF(i), reg); + /* clear bit from VFTA */ + vfta = adapter->shadow_vfta[vid / 32]; + if (vfta & vfta_mask) + hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask); +update_vlvf: + /* clear pool selection enable */ + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + vlvf &= E1000_VLVF_POOLSEL_MASK; + else + vlvf = 0; +update_vlvfb: + /* clear pool bits */ + wr32(E1000_VLVF(i), vlvf); } } -static int igb_find_vlvf_entry(struct igb_adapter *adapter, int vid) +static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan) { - struct e1000_hw *hw = &adapter->hw; - int i; - u32 reg; + u32 vlvf; + int idx; - /* Find the vlan filter for this id */ - for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { - reg = rd32(E1000_VLVF(i)); - if ((reg & E1000_VLVF_VLANID_ENABLE) && - vid == (reg & E1000_VLVF_VLANID_MASK)) + /* short cut the special case */ + if (vlan == 0) + return 0; + + /* Search for the VLAN id in the VLVF entries */ + for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) { + vlvf = rd32(E1000_VLVF(idx)); + if ((vlvf & VLAN_VID_MASK) == vlan) break; } - if (i >= E1000_VLVF_ARRAY_SIZE) - i = -1; + return idx; +} + +void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid) +{ + struct e1000_hw *hw = &adapter->hw; + u32 bits, pf_id; + int idx; + + idx = igb_find_vlvf_entry(hw, vid); + if (!idx) + return; - return i; + /* See if any other pools are set for this VLAN filter + * entry other than the PF. + */ + pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT; + bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK; + bits &= rd32(E1000_VLVF(idx)); + + /* Disable the filter so this falls into the default pool. */ + if (!bits) { + if (adapter->flags & IGB_FLAG_VLAN_PROMISC) + wr32(E1000_VLVF(idx), 1 << pf_id); + else + wr32(E1000_VLVF(idx), 0); + } } static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, @@ -5818,7 +6003,7 @@ static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, * redundant but it guarantees PF will maintain visibility to * the VLAN. */ - if (add && (adapter->netdev->flags & IFF_PROMISC)) { + if (add && test_bit(vid, adapter->active_vlans)) { err = igb_vfta_set(hw, vid, pf_id, true, false); if (err) return err; @@ -5826,36 +6011,17 @@ static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid, err = igb_vfta_set(hw, vid, vf, add, false); - if (err) - goto out; + if (add && !err) + return err; - /* Go through all the checks to see if the VLAN filter should - * be wiped completely. + /* If we failed to add the VF VLAN or we are removing the VF VLAN + * we may need to drop the PF pool bit in order to allow us to free + * up the VLVF resources. */ - if (!add && (adapter->netdev->flags & IFF_PROMISC)) { - u32 vlvf, bits; - int regndx = igb_find_vlvf_entry(adapter, vid); - - if (regndx < 0) - goto out; - /* See if any other pools are set for this VLAN filter - * entry other than the PF. - */ - vlvf = bits = rd32(E1000_VLVF(regndx)); - bits &= 1 << (E1000_VLVF_POOLSEL_SHIFT + - adapter->vfs_allocated_count); - /* If the filter was removed then ensure PF pool bit - * is cleared if the PF only added itself to the pool - * because the PF is in promiscuous mode. - */ - if ((vlvf & VLAN_VID_MASK) == vid && - !test_bit(vid, adapter->active_vlans) && - !bits) - igb_vfta_set(hw, vid, adapter->vfs_allocated_count, - false, false); - } + if (test_bit(vid, adapter->active_vlans) || + (adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_update_pf_vlvf(adapter, vid); -out: return err; } @@ -7124,7 +7290,9 @@ static int igb_vlan_rx_add_vid(struct net_device *netdev, int pf_id = adapter->vfs_allocated_count; /* add the filter since PF can receive vlans w/o entry in vlvf */ - igb_vfta_set(hw, vid, pf_id, true, true); + if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, true, !!vid); + set_bit(vid, adapter->active_vlans); return 0; @@ -7138,7 +7306,8 @@ static int igb_vlan_rx_kill_vid(struct net_device *netdev, struct e1000_hw *hw = &adapter->hw; /* remove VID from filter table */ - igb_vfta_set(hw, vid, pf_id, false, true); + if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC)) + igb_vfta_set(hw, vid, pf_id, false, true); clear_bit(vid, adapter->active_vlans); -- cgit v1.2.3 From 9c2f186e45faa34d5f6ff52aa84c361d4be72288 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:11:26 -0800 Subject: igb: Drop unnecessary checks in transmit path This patch drops several checks that we dropped from ixgbe some ago. It should not be possible for us to be called with either of the conditional statements returning true so we can just drop them from the hot-path. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7366d4ff0400..bb5be404a56a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -5193,16 +5193,6 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, { struct igb_adapter *adapter = netdev_priv(netdev); - if (test_bit(__IGB_DOWN, &adapter->state)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - - if (skb->len <= 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } - /* The minimum packet size with TCTL.PSP set is 17 so pad the skb * in order to meet this minimum size requirement. */ -- cgit v1.2.3 From 268f9d33a9319bb2d4d999e264aef9c00081bba0 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:11:34 -0800 Subject: igb: Enable use of "bridge fdb add" to set unicast table entries This change makes it so that we can use the bridge utility to add a FDB entry for the PF to an igb port. By doing this we can enable the VFs to talk to virtual ports residing on top of the PF. In addition this should also address issues with MACVLANs trying to reside on top of the PF as well as they would have had similar issues when added to the PF with SR-IOV enabled. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 39 ++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index bb5be404a56a..e9bdad79c9e4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2067,6 +2067,25 @@ static int igb_set_features(struct net_device *netdev, return 0; } +static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, u16 vid, + u16 flags) +{ + /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { + struct igb_adapter *adapter = netdev_priv(dev); + struct e1000_hw *hw = &adapter->hw; + int vfn = adapter->vfs_allocated_count; + int rar_entries = hw->mac.rar_entry_count - (vfn + 1); + + if (netdev_uc_count(dev) >= rar_entries) + return -ENOMEM; + } + + return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -2090,6 +2109,7 @@ static const struct net_device_ops igb_netdev_ops = { #endif .ndo_fix_features = igb_fix_features, .ndo_set_features = igb_set_features, + .ndo_fdb_add = igb_ndo_fdb_add, .ndo_features_check = passthru_features_check, }; @@ -4132,15 +4152,16 @@ static void igb_set_rx_mode(struct net_device *netdev) vmolr |= E1000_VMOLR_ROMPE; } } - /* Write addresses to available RAR registers, if there is not - * sufficient space to store all the addresses then enable - * unicast promiscuous mode - */ - count = igb_write_uc_addr_list(netdev); - if (count < 0) { - rctl |= E1000_RCTL_UPE; - vmolr |= E1000_VMOLR_ROPE; - } + } + + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = igb_write_uc_addr_list(netdev); + if (count < 0) { + rctl |= E1000_RCTL_UPE; + vmolr |= E1000_VMOLR_ROPE; } /* enable VLAN filtering by default */ -- cgit v1.2.3 From bf456abb9b82d5376e7189cca00b528dd86d1559 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 6 Jan 2016 23:11:43 -0800 Subject: igb: Add workaround for VLAN tag stripping on 82576 There was a workaround partially implemented for the 82576 that is needed in order for VLAN tag stripping to function correctly. The original code had side effects that would make it so the workaround was active on all MACs. I have updated the code so that the workaround is enabled, but limited to the 82576, or activated if we exceed the available unicast addresses. The workaround has a side effect of mirroring all of the traffic outgoing from the VFs back to the PF. As such it is not recommended to use the 82576 in promiscuous mode as it will take a performance hit, though this is now consistent with the performance as seen on the out-of-tree igb driver. I also limited the scope of the UTA bits all being set to only when the VMOLR register is enabled. This should limit the effects of the UTA register so that we don't pick up any excess traffic unless promiscuous mode has been enabled on the PF, whereas before the PF would have ended up in something equivalent to unicast promiscuous mode with VLAN filtering otherwise. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 2 ++ drivers/net/ethernet/intel/igb/igb_main.c | 26 ++++++++++++++------------ 2 files changed, 16 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index fff50523b440..9a1a9c7b0748 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -425,6 +425,8 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw) /* Set mta register count */ mac->mta_reg_count = 128; + /* Set uta register count */ + mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128; /* Set rar entry count */ switch (mac->type) { case e1000_82576: diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e9bdad79c9e4..af46fcf8a50e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -140,7 +140,7 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); static int igb_change_mtu(struct net_device *, int); static int igb_set_mac(struct net_device *, void *); -static void igb_set_uta(struct igb_adapter *adapter); +static void igb_set_uta(struct igb_adapter *adapter, bool set); static irqreturn_t igb_intr(int irq, void *); static irqreturn_t igb_intr_msi(int irq, void *); static irqreturn_t igb_msix_other(int irq, void *); @@ -3670,9 +3670,6 @@ static void igb_configure_rx(struct igb_adapter *adapter) { int i; - /* set UTA to appropriate mode */ - igb_set_uta(adapter); - /* set the correct pool for the PF default MAC address in entry 0 */ igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, adapter->vfs_allocated_count); @@ -4134,7 +4131,11 @@ static void igb_set_rx_mode(struct net_device *netdev) /* Check for Promiscuous and All Multicast modes */ if (netdev->flags & IFF_PROMISC) { rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE; - vmolr |= E1000_VMOLR_ROPE | E1000_VMOLR_MPME; + vmolr |= E1000_VMOLR_MPME; + + /* enable use of UTA filter to force packets to default pool */ + if (hw->mac.type == e1000_82576) + vmolr |= E1000_VMOLR_ROPE; } else { if (netdev->flags & IFF_ALLMULTI) { rctl |= E1000_RCTL_MPE; @@ -4190,6 +4191,9 @@ static void igb_set_rx_mode(struct net_device *netdev) if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350)) return; + /* set UTA to appropriate mode */ + igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE)); + vmolr |= rd32(E1000_VMOLR(vfn)) & ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); @@ -6323,6 +6327,7 @@ static void igb_msg_task(struct igb_adapter *adapter) /** * igb_set_uta - Set unicast filter table address * @adapter: board private structure + * @set: boolean indicating if we are setting or clearing bits * * The unicast table address is a register array of 32-bit registers. * The table is meant to be used in a way similar to how the MTA is used @@ -6330,21 +6335,18 @@ static void igb_msg_task(struct igb_adapter *adapter) * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous * enable bit to allow vlan tag stripping when promiscuous mode is enabled **/ -static void igb_set_uta(struct igb_adapter *adapter) +static void igb_set_uta(struct igb_adapter *adapter, bool set) { struct e1000_hw *hw = &adapter->hw; + u32 uta = set ? ~0 : 0; int i; - /* The UTA table only exists on 82576 hardware and newer */ - if (hw->mac.type < e1000_82576) - return; - /* we only need to do this if VMDq is enabled */ if (!adapter->vfs_allocated_count) return; - for (i = 0; i < hw->mac.uta_reg_count; i++) - array_wr32(E1000_UTA, i, ~0); + for (i = hw->mac.uta_reg_count; i--;) + array_wr32(E1000_UTA, i, uta); } /** -- cgit v1.2.3 From 3ccc30552e626934dcbaea7bfa82a4d9eb84ba6d Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 6 Feb 2016 13:09:36 -0800 Subject: net: phy: bcm7xxx: Make MII_BCM7XX_64CLK_MDIO naming consistent The driver is BCM7xxx, we were missing an additional X in the constant naming, fix that to be consistent. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index bf241a3ec5e5..621518572dd7 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -24,7 +24,7 @@ #define MII_BCM7XXX_100TX_FALSE_CAR 0x13 #define MII_BCM7XXX_100TX_DISC 0x14 #define MII_BCM7XXX_AUX_MODE 0x1d -#define MII_BCM7XX_64CLK_MDIO BIT(12) +#define MII_BCM7XXX_64CLK_MDIO BIT(12) #define MII_BCM7XXX_TEST 0x1f #define MII_BCM7XXX_SHD_MODE_2 BIT(2) @@ -247,7 +247,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev) int ret; /* Enable 64 clock MDIO */ - phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); + phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XXX_64CLK_MDIO); phy_read(phydev, MII_BCM7XXX_AUX_MODE); /* Workaround only required for 100Mbits/sec capable PHYs */ -- cgit v1.2.3 From 3125c081a5937c548a6673cd7309d688c8c47b7c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 6 Feb 2016 13:09:37 -0800 Subject: net: phy: bcm7xxx: Reduce boilerplate code for 40nm EPHY Introduce a macro which helps adding new 40NM EPHY entries and reduces the amount of boilerplate code. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/phy/bcm7xxx.c | 54 ++++++++++++++++------------------------------- 1 file changed, 18 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 621518572dd7..9b311041ebfb 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -326,6 +326,21 @@ static int bcm7xxx_dummy_config_init(struct phy_device *phydev) .resume = bcm7xxx_28nm_resume, \ } +#define BCM7XXX_40NM_EPHY(_oui, _name) \ +{ \ + .phy_id = (_oui), \ + .phy_id_mask = 0xfffffff0, \ + .name = _name, \ + .features = PHY_BASIC_FEATURES | \ + SUPPORTED_Pause | SUPPORTED_Asym_Pause, \ + .flags = PHY_IS_INTERNAL, \ + .config_init = bcm7xxx_config_init, \ + .config_aneg = genphy_config_aneg, \ + .read_status = genphy_read_status, \ + .suspend = bcm7xxx_suspend, \ + .resume = bcm7xxx_config_init, \ +} + static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"), @@ -333,43 +348,10 @@ static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"), BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"), + BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"), + BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"), + BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"), { - .phy_id = PHY_ID_BCM7425, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7425", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_config_init, -}, { - .phy_id = PHY_ID_BCM7429, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7429", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_config_init, -}, { - .phy_id = PHY_ID_BCM7435, - .phy_id_mask = 0xfffffff0, - .name = "Broadcom BCM7435", - .features = PHY_GBIT_FEATURES | - SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_IS_INTERNAL, - .config_init = bcm7xxx_config_init, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, - .suspend = bcm7xxx_suspend, - .resume = bcm7xxx_config_init, -}, { .phy_id = PHY_BCM_OUI_4, .phy_id_mask = 0xffff0000, .name = "Broadcom BCM7XXX 40nm", -- cgit v1.2.3 From 64f10f6ebf5a6d3aef106af43cf697682d022f48 Mon Sep 17 00:00:00 2001 From: Bernhard Walle Date: Mon, 8 Feb 2016 21:21:13 +0100 Subject: net: fec: Add "phy-reset-active-low" property to DT We need that for a custom hardware that needs the reverse reset sequence. Signed-off-by: Bernhard Walle Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/fsl-fec.txt | 3 +++ drivers/net/ethernet/freescale/fec_main.c | 8 ++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index a9eb611bee68..a4799fff0d1f 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -12,6 +12,9 @@ Optional properties: only if property "phy-reset-gpios" is available. Missing the property will have the duration be 1 millisecond. Numbers greater than 1000 are invalid and 1 millisecond will be used instead. +- phy-reset-active-low : If present then the reset sequence using the GPIO + specified in the "phy-reset-gpios" property is reversed (H=reset state, + L=operation state). - phy-supply : regulator that powers the Ethernet PHY. - phy-handle : phandle to the PHY device connected to this device. - fixed-link : Assume a fixed link. See fixed-link.txt in the same directory. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index ef18ca501f9e..bad0ba29a94a 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3191,6 +3191,7 @@ static int fec_enet_init(struct net_device *ndev) static void fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; + bool active_low = false; int msec = 1; struct device_node *np = pdev->dev.of_node; @@ -3206,14 +3207,17 @@ static void fec_reset_phy(struct platform_device *pdev) if (!gpio_is_valid(phy_reset)) return; + active_low = of_property_read_bool(np, "phy-reset-active-low"); + err = devm_gpio_request_one(&pdev->dev, phy_reset, - GPIOF_OUT_INIT_LOW, "phy-reset"); + active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + "phy-reset"); if (err) { dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } msleep(msec); - gpio_set_value_cansleep(phy_reset, 1); + gpio_set_value_cansleep(phy_reset, !active_low); } #else /* CONFIG_OF */ static void fec_reset_phy(struct platform_device *pdev) -- cgit v1.2.3 From 1012014ef523510321728daac8e07bd70650b793 Mon Sep 17 00:00:00 2001 From: "Keller, Jacob E" Date: Mon, 8 Feb 2016 16:05:05 -0800 Subject: fm10k: don't reinitialize RSS flow table when RXFH configured Also print an error message incase we do have to reconfigure as this should no longer happen anymore due to ethtool changes. If it somehow does occur, user should be made aware of it. Signed-off-by: Jacob Keller Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/fm10k/fm10k_main.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index b243c3cbe68f..38f558e0bb62 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1937,8 +1937,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices; u32 reta, base; - /* If the netdev is initialized we have to maintain table if possible */ - if (interface->netdev->reg_state != NETREG_UNINITIALIZED) { + /* If the Rx flow indirection table has been configured manually, we + * need to maintain it when possible. + */ + if (netif_is_rxfh_configured(interface->netdev)) { for (i = FM10K_RETA_SIZE; i--;) { reta = interface->reta[i]; if ((((reta << 24) >> 24) < rss_i) && @@ -1946,6 +1948,10 @@ static void fm10k_init_reta(struct fm10k_intfc *interface) (((reta << 8) >> 24) < rss_i) && (((reta) >> 24) < rss_i)) continue; + + /* this should never happen */ + dev_err(&interface->pdev->dev, + "RSS indirection table assigned flows out of queue bounds. Reconfiguring.\n"); goto repopulate_reta; } -- cgit v1.2.3 From 37ace20a3c99c54ebffb4b13671a01adb20926ca Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 10 Feb 2016 00:12:36 +0530 Subject: dmascc: Return correct error codes This change has been made with the goal that kernel functions should return something more descriptive than -1 on failure. A variable `err` has been introduced for storing error codes. The return value of kzalloc on failure should return a -1 and not a -ENOMEM. This was found using Coccinelle. A simplified version of the semantic patch used is: // @@ expression *e; identifier l1; @@ e = kzalloc(...); if (e == NULL) { ... goto l1; } l1: ... return -1 + -ENOMEM ; // Signed-off-by: David S. Miller --- drivers/net/hamradio/dmascc.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c index c3d377770616..e4137c1b3df9 100644 --- a/drivers/net/hamradio/dmascc.c +++ b/drivers/net/hamradio/dmascc.c @@ -451,7 +451,7 @@ static const struct net_device_ops scc_netdev_ops = { static int __init setup_adapter(int card_base, int type, int n) { - int i, irq, chip; + int i, irq, chip, err; struct scc_info *info; struct net_device *dev; struct scc_priv *priv; @@ -463,14 +463,17 @@ static int __init setup_adapter(int card_base, int type, int n) /* Initialize what is necessary for write_scc and write_scc_data */ info = kzalloc(sizeof(struct scc_info), GFP_KERNEL | GFP_DMA); - if (!info) + if (!info) { + err = -ENOMEM; goto out; + } info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup); if (!info->dev[0]) { printk(KERN_ERR "dmascc: " "could not allocate memory for %s at %#3x\n", hw[type].name, card_base); + err = -ENOMEM; goto out1; } @@ -479,6 +482,7 @@ static int __init setup_adapter(int card_base, int type, int n) printk(KERN_ERR "dmascc: " "could not allocate memory for %s at %#3x\n", hw[type].name, card_base); + err = -ENOMEM; goto out2; } spin_lock_init(&info->register_lock); @@ -549,6 +553,7 @@ static int __init setup_adapter(int card_base, int type, int n) printk(KERN_ERR "dmascc: could not find irq of %s at %#3x (irq=%d)\n", hw[type].name, card_base, irq); + err = -ENODEV; goto out3; } @@ -585,11 +590,13 @@ static int __init setup_adapter(int card_base, int type, int n) if (register_netdev(info->dev[0])) { printk(KERN_ERR "dmascc: could not register %s\n", info->dev[0]->name); + err = -ENODEV; goto out3; } if (register_netdev(info->dev[1])) { printk(KERN_ERR "dmascc: could not register %s\n", info->dev[1]->name); + err = -ENODEV; goto out4; } @@ -612,7 +619,7 @@ static int __init setup_adapter(int card_base, int type, int n) out1: kfree(info); out: - return -1; + return err; } -- cgit v1.2.3 From 906807793092f7d659a8933ddd7b9fa97cd6cf39 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:41 -0800 Subject: i40e: AQ Shared resource flags Add flags to MAC allocation requests to signify that the MAC VLAN filters should come from the shared resource pool rather than the dedicated PF resource pools. Change-ID: I4c2da64c01856edcb0982bc4aab75c5a91047a7a Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 5 +++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 5 +++++ 2 files changed, 10 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 0e608d2a70d5..6e0834eb8eeb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -976,6 +976,7 @@ struct i40e_aqc_add_macvlan_element_data { #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ @@ -1265,6 +1266,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 + __le32 tenant_id; u8 reserved[4]; __le16 queue_number; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 578b1780fb08..220b972ace4c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -973,6 +973,7 @@ struct i40e_aqc_add_macvlan_element_data { #define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 #define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 #define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 +#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 __le16 queue_number; #define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 #define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ @@ -1262,6 +1263,10 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 +#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 + __le32 tenant_id; u8 reserved[4]; __le16 queue_number; -- cgit v1.2.3 From fa5623a6e6807baa7f3b8e65896199e23e3f939e Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:42 -0800 Subject: i40e: AQ Add set_switch_config Add the new Set Switch Config AdminQ command, and mark the L2 Filter bit as deprecated in the Add VEB command. Change-ID: I5b24790f14c56f0ddf3f70df1e486844146b039f Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 15 ++++++++++++++- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 15 ++++++++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 6e0834eb8eeb..7e9661592c52 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -145,6 +145,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -683,6 +684,17 @@ struct i40e_aqc_switch_resource_alloc_element_resp { I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) @@ -909,7 +921,8 @@ struct i40e_aqc_add_veb { I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 220b972ace4c..2c8f3ed15822 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -145,6 +145,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_remove_statistics = 0x0202, i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -680,6 +681,17 @@ struct i40e_aqc_switch_resource_alloc_element_resp { I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); +/* Set Switch Configuration (direct 0x0205) */ +struct i40e_aqc_set_switch_config { + __le16 flags; +#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 +#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 + __le16 valid_flags; + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) @@ -906,7 +918,8 @@ struct i40e_aqc_add_veb { I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) #define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 #define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 +#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ +#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 u8 enable_tcs; u8 reserved[9]; }; -- cgit v1.2.3 From 445204644ba6e5565d7aba85a4323b52960179db Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 16 Feb 2016 18:07:57 +0200 Subject: bnx2x: Remove unneccessary EXPORT_SYMBOL bnx2x_schedule_sp_rtnl is exported by bnx2x, although no other module uses it. Reported-by: Benjamin Poirier Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9695a4c4a434..9e42bcaf9917 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -5086,4 +5086,3 @@ void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag, flag); schedule_delayed_work(&bp->sp_rtnl_task, 0); } -EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl); -- cgit v1.2.3 From 883ce97d25b019ce8437ba6f49e38302ca5ec23f Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 16 Feb 2016 18:07:58 +0200 Subject: bnx2x: Add Geneve inner-RSS support This adds the ability to perform RSS hashing based on encapsulated headers for a geneve-encapsulated packet. This also changes the Vxlan implementation in bnx2x to be uniform for both vxlan and geneve [from configuration perspective]. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 19 ++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 11 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 175 ++++++++++++++++------- 3 files changed, 146 insertions(+), 59 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index cae0956186ce..7dd7490fdac1 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1277,8 +1277,7 @@ enum sp_rtnl_flag { BNX2X_SP_RTNL_HYPERVISOR_VLAN, BNX2X_SP_RTNL_TX_STOP, BNX2X_SP_RTNL_GET_DRV_VERSION, - BNX2X_SP_RTNL_ADD_VXLAN_PORT, - BNX2X_SP_RTNL_DEL_VXLAN_PORT, + BNX2X_SP_RTNL_CHANGE_UDP_PORT, }; enum bnx2x_iov_flag { @@ -1327,6 +1326,17 @@ struct bnx2x_vlan_entry { bool hw; }; +enum bnx2x_udp_port_type { + BNX2X_UDP_PORT_VXLAN, + BNX2X_UDP_PORT_GENEVE, + BNX2X_UDP_PORT_MAX, +}; + +struct bnx2x_udp_tunnel { + u16 dst_port; + u8 count; +}; + struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure @@ -1830,9 +1840,10 @@ struct bnx2x { struct list_head vlan_reg; u16 vlan_cnt; u16 vlan_credit; - u16 vxlan_dst_port; - u8 vxlan_dst_port_count; bool accept_any_vlan; + + /* Vxlan/Geneve related information */ + struct bnx2x_udp_tunnel udp_tunnel_ports[BNX2X_UDP_PORT_MAX]; }; /* Tx queues may be less or equal to Rx queues */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 4cbb03f87b5a..37369865ca6d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -923,6 +923,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp) struct bnx2x_func_state_params func_params = {NULL}; struct bnx2x_func_start_params *start_params = &func_params.params.start; + u16 port; /* Prepare parameters for function state transitions */ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags); @@ -959,8 +960,14 @@ static inline int bnx2x_func_start(struct bnx2x *bp) start_params->network_cos_mode = STATIC_COS; else /* CHIP_IS_E1X */ start_params->network_cos_mode = FW_WRR; - - start_params->vxlan_dst_port = bp->vxlan_dst_port; + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].dst_port; + start_params->vxlan_dst_port = port; + } + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { + port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].dst_port; + start_params->geneve_dst_port = port; + } start_params->inner_rss = 1; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6c4e3a69976f..5d6b2d95eb9d 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -59,7 +59,9 @@ #include #include #include - +#if IS_ENABLED(CONFIG_GENEVE) +#include +#endif #include "bnx2x.h" #include "bnx2x_init.h" #include "bnx2x_init_ops.h" @@ -10076,11 +10078,13 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } -#ifdef CONFIG_BNX2X_VXLAN -static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_GENEVE) +static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; struct bnx2x_func_state_params func_params = {NULL}; + struct bnx2x_udp_tunnel *udp_tunnel; + u16 vxlan_port = 0, geneve_port = 0; int rc; switch_update_params = &func_params.params.switch_update; @@ -10095,69 +10099,125 @@ static int bnx2x_vxlan_port_update(struct bnx2x *bp, u16 port) /* Function parameters */ __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG, &switch_update_params->changes); - switch_update_params->vxlan_dst_port = port; + + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) { + udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]; + geneve_port = udp_tunnel->dst_port; + switch_update_params->geneve_dst_port = geneve_port; + } + + if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) { + udp_tunnel = &bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]; + vxlan_port = udp_tunnel->dst_port; + switch_update_params->vxlan_dst_port = vxlan_port; + } + + /* Re-enable inner-rss for the offloaded UDP tunnels */ + __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS, + &switch_update_params->changes); + rc = bnx2x_func_state_change(bp, &func_params); if (rc) - BNX2X_ERR("failed to change vxlan dst port to %d (rc = 0x%x)\n", - port, rc); + BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n", + vxlan_port, geneve_port, rc); + else + DP(BNX2X_MSG_SP, + "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n", + vxlan_port, geneve_port); + return rc; } -static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port) +static void __bnx2x_add_udp_port(struct bnx2x *bp, u16 port, + enum bnx2x_udp_port_type type) { - if (!netif_running(bp->dev)) + struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; + + if (!netif_running(bp->dev) || !IS_PF(bp)) + return; + + if (udp_port->count && udp_port->dst_port == port) { + udp_port->count++; return; + } - if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) { - bp->vxlan_dst_port_count++; + if (udp_port->count) { + DP(BNX2X_MSG_SP, + "UDP tunnel [%d] - destination port limit reached\n", + type); return; } - if (bp->vxlan_dst_port_count || !IS_PF(bp)) { - DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n"); + udp_port->dst_port = port; + udp_port->count = 1; + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); +} + +static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port, + enum bnx2x_udp_port_type type) +{ + struct bnx2x_udp_tunnel *udp_port = &bp->udp_tunnel_ports[type]; + + if (!IS_PF(bp)) + return; + + if (!udp_port->count || udp_port->dst_port != port) { + DP(BNX2X_MSG_SP, "Invalid UDP tunnel [%d] port\n", + type); return; } - bp->vxlan_dst_port = port; - bp->vxlan_dst_port_count = 1; - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0); + /* Remove reference, and make certain it's no longer in use */ + udp_port->count--; + if (udp_port->count) + return; + udp_port->dst_port = 0; + + if (netif_running(bp->dev)) + bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_CHANGE_UDP_PORT, 0); + else + DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n", + type, port); } +#endif +#ifdef CONFIG_BNX2X_VXLAN static void bnx2x_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { struct bnx2x *bp = netdev_priv(netdev); u16 t_port = ntohs(port); - __bnx2x_add_vxlan_port(bp, t_port); + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); } -static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port) +static void bnx2x_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { - if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port || - !IS_PF(bp)) { - DP(BNX2X_MSG_SP, "Invalid vxlan port\n"); - return; - } - bp->vxlan_dst_port_count--; - if (bp->vxlan_dst_port_count) - return; + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); - if (netif_running(bp->dev)) { - bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0); - } else { - bp->vxlan_dst_port = 0; - netdev_info(bp->dev, "Deleted vxlan dest port %d", port); - } + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN); +} +#endif + +#if IS_ENABLED(CONFIG_GENEVE) +static void bnx2x_add_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct bnx2x *bp = netdev_priv(netdev); + u16 t_port = ntohs(port); + + __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); } -static void bnx2x_del_vxlan_port(struct net_device *netdev, - sa_family_t sa_family, __be16 port) +static void bnx2x_del_geneve_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) { struct bnx2x *bp = netdev_priv(netdev); u16 t_port = ntohs(port); - __bnx2x_del_vxlan_port(bp, t_port); + __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE); } #endif @@ -10169,9 +10229,6 @@ static int bnx2x_close(struct net_device *dev); static void bnx2x_sp_rtnl_task(struct work_struct *work) { struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work); -#ifdef CONFIG_BNX2X_VXLAN - u16 port; -#endif rtnl_lock(); @@ -10270,23 +10327,27 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); -#ifdef CONFIG_BNX2X_VXLAN - port = bp->vxlan_dst_port; - if (test_and_clear_bit(BNX2X_SP_RTNL_ADD_VXLAN_PORT, - &bp->sp_rtnl_state)) { - if (!bnx2x_vxlan_port_update(bp, port)) - netdev_info(bp->dev, "Added vxlan dest port %d", port); - else - bp->vxlan_dst_port = 0; - } - - if (test_and_clear_bit(BNX2X_SP_RTNL_DEL_VXLAN_PORT, +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_GENEVE) + if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { - if (!bnx2x_vxlan_port_update(bp, 0)) { - netdev_info(bp->dev, - "Deleted vxlan dest port %d", port); - bp->vxlan_dst_port = 0; - vxlan_get_rx_port(bp->dev); + if (bnx2x_udp_port_update(bp)) { + /* On error, forget configuration */ + memset(bp->udp_tunnel_ports, 0, + sizeof(struct bnx2x_udp_tunnel) * + BNX2X_UDP_PORT_MAX); + } else { + /* Since we don't store additional port information, + * if no port is configured for any feature ask for + * information about currently configured ports. + */ +#ifdef CONFIG_BNX2X_VXLAN + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) + vxlan_get_rx_port(bp->dev); +#endif +#if IS_ENABLED(CONFIG_GENEVE) + if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) + geneve_get_rx_port(bp->dev); +#endif } } #endif @@ -12494,6 +12555,10 @@ static int bnx2x_open(struct net_device *dev) if (IS_PF(bp)) vxlan_get_rx_port(dev); #endif +#if IS_ENABLED(CONFIG_GENEVE) + if (IS_PF(bp)) + geneve_get_rx_port(dev); +#endif return 0; } @@ -13011,6 +13076,10 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_add_vxlan_port = bnx2x_add_vxlan_port, .ndo_del_vxlan_port = bnx2x_del_vxlan_port, #endif +#if IS_ENABLED(CONFIG_GENEVE) + .ndo_add_geneve_port = bnx2x_add_geneve_port, + .ndo_del_geneve_port = bnx2x_del_geneve_port, +#endif }; static int bnx2x_set_coherency_mask(struct bnx2x *bp) -- cgit v1.2.3 From 9c73267d2ea26b0148932f335a7deb5480a80425 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 16 Feb 2016 18:07:59 +0200 Subject: bnx2x: Add support for single-port DCBx Driver is currently looking at shared information for determining whether DCBx can be supported for a given port. On 4-port devices, up-to-date management firmware can support DCBx on each port of a given engine independently - but that would cause bnx2x to misinterpert the support and assume DCBx is supported on both. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 5d6b2d95eb9d..c5845252c920 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12429,8 +12429,10 @@ static int bnx2x_init_bp(struct bnx2x *bp) if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) && SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_HAS(bp, dcbx_en) && SHMEM2_RD(bp, dcbx_lldp_params_offset) && - SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) { + SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) && + SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) { bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON); bnx2x_dcbx_init_params(bp); } else { -- cgit v1.2.3 From e5d3a51cefbb08c8b8c521f627aef1f998834530 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 16 Feb 2016 18:08:00 +0200 Subject: bnx2x: extend DCBx support This adds support for default application priority. Signed-off-by: Yuval Mintz Signed-off-by: Ariel Elior Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | 57 +++++++++++++++++++------ drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 5 ++- 2 files changed, 47 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 7ccf6684e0a3..2c6ba046d2a8 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -195,6 +195,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, u32 error) { u8 index; u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority; + u8 iscsi_pri_found = 0, fcoe_pri_found = 0; if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n"); @@ -210,29 +211,57 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp, bp->dcbx_port_params.app.enabled = true; + /* Use 0 as the default application priority for all. */ for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++) ttp[index] = 0; - if (app->default_pri < MAX_PFC_PRIORITIES) - ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri; - for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) { struct dcbx_app_priority_entry *entry = app->app_pri_tbl; + enum traffic_type type = MAX_TRAFFIC_TYPE; if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_ETH_TYPE) && - ETH_TYPE_FCOE == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_FCOE); + DCBX_APP_SF_DEFAULT) && + GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE)) { + type = LLFC_TRAFFIC_TYPE_NW; + } else if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_PORT) && + TCP_PORT_ISCSI == entry[index].app_id) { + type = LLFC_TRAFFIC_TYPE_ISCSI; + iscsi_pri_found = 1; + } else if (GET_FLAGS(entry[index].appBitfield, + DCBX_APP_SF_ETH_TYPE) && + ETH_TYPE_FCOE == entry[index].app_id) { + type = LLFC_TRAFFIC_TYPE_FCOE; + fcoe_pri_found = 1; + } - if (GET_FLAGS(entry[index].appBitfield, - DCBX_APP_SF_PORT) && - TCP_PORT_ISCSI == entry[index].app_id) - bnx2x_dcbx_get_ap_priority(bp, - entry[index].pri_bitmap, - LLFC_TRAFFIC_TYPE_ISCSI); + if (type == MAX_TRAFFIC_TYPE) + continue; + + bnx2x_dcbx_get_ap_priority(bp, + entry[index].pri_bitmap, + type); + } + + /* If we have received a non-zero default application + * priority, then use that for applications which are + * not configured with any priority. + */ + if (ttp[LLFC_TRAFFIC_TYPE_NW] != 0) { + if (!iscsi_pri_found) { + ttp[LLFC_TRAFFIC_TYPE_ISCSI] = + ttp[LLFC_TRAFFIC_TYPE_NW]; + DP(BNX2X_MSG_DCB, + "ISCSI is using default priority.\n"); + } + if (!fcoe_pri_found) { + ttp[LLFC_TRAFFIC_TYPE_FCOE] = + ttp[LLFC_TRAFFIC_TYPE_NW]; + DP(BNX2X_MSG_DCB, + "FCoE is using default priority.\n"); + } } } else { DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 27aa0802d87d..dd9d6e6100a5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1831,10 +1831,13 @@ struct dcbx_app_priority_entry { #elif defined(__LITTLE_ENDIAN) u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 #define DCBX_APP_ENTRY_SF_SHIFT 4 + #define DCBX_APP_ENTRY_VALID 0x01 #define DCBX_APP_SF_ETH_TYPE 0x10 #define DCBX_APP_SF_PORT 0x20 + #define DCBX_APP_SF_UDP 0x40 + #define DCBX_APP_SF_DEFAULT 0x80 u8 pri_bitmap; u16 app_id; #endif -- cgit v1.2.3 From e56270f63529718bed5b9017c98fb355a25e9954 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 16 Feb 2016 18:08:01 +0200 Subject: bnx2x: Warn about grc timeouts in register dump There are several scenarios where taking a register dump from a device might log benign GRC timeout attentions to system logs. Most common of those is when taking the dump from a 2-port device. Sadly, there's no easy way to mask the problematic attentions during the flow - Changing this behvaior would require a firmware update. For now, simply warn users to ignore the warnings. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 820b7e04bb5f..85a7800bfc12 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -981,6 +981,11 @@ static void bnx2x_get_regs(struct net_device *dev, memcpy(p, &dump_hdr, sizeof(struct dump_header)); p += dump_hdr.header_size + 1; + /* This isn't really an error, but since attention handling is going + * to print the GRC timeouts using this macro, we use the same. + */ + BNX2X_ERR("Generating register dump. Might trigger harmless GRC timeouts\n"); + /* Actually read the registers */ __bnx2x_get_regs(bp, p); -- cgit v1.2.3 From 0c1d70af924b966cc71e9e48920b2b635441aa50 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 12 Feb 2016 15:43:56 +0100 Subject: net: use dst_cache for vxlan device In case of UDP traffic with datagram length below MTU this give about 3% performance increase when tunneling over ipv4 and about 70% when tunneling over ipv6. Signed-off-by: Paolo Abeni Suggested-and-acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 55 +++++++++++++++++++++++++++++++++++++++++++++-------- include/net/vxlan.h | 1 + 2 files changed, 48 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0a23c64379d6..ad673037bd73 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -480,6 +480,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list); if (!rd) return 0; + + dst_cache_reset(&rd->dst_cache); rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; @@ -501,6 +503,12 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (rd == NULL) return -ENOBUFS; + + if (dst_cache_init(&rd->dst_cache, GFP_ATOMIC)) { + kfree(rd); + return -ENOBUFS; + } + rd->remote_ip = *ip; rd->remote_port = port; rd->remote_vni = vni; @@ -749,8 +757,10 @@ static void vxlan_fdb_free(struct rcu_head *head) struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu); struct vxlan_rdst *rd, *nd; - list_for_each_entry_safe(rd, nd, &f->remotes, list) + list_for_each_entry_safe(rd, nd, &f->remotes, list) { + dst_cache_destroy(&rd->dst_cache); kfree(rd); + } kfree(f); } @@ -1754,11 +1764,24 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, u8 tos, - __be32 daddr, __be32 *saddr) + __be32 daddr, __be32 *saddr, + struct dst_cache *dst_cache, + struct ip_tunnel_info *info) { struct rtable *rt = NULL; + bool use_cache = false; struct flowi4 fl4; + /* when the ip_tunnel_info is availble, the tos used for lookup is + * packet independent, so we can use the cache + */ + if (dst_cache && !skb->mark && (!tos || info)) { + use_cache = true; + rt = dst_cache_get_ip4(dst_cache, saddr); + if (rt) + return rt; + } + memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_oif = oif; fl4.flowi4_tos = RT_TOS(tos); @@ -1768,8 +1791,11 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr; rt = ip_route_output_key(vxlan->net, &fl4); - if (!IS_ERR(rt)) + if (!IS_ERR(rt)) { *saddr = fl4.saddr; + if (use_cache) + dst_cache_set_ip4(dst_cache, &rt->dst, fl4.saddr); + } return rt; } @@ -1777,12 +1803,21 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, const struct in6_addr *daddr, - struct in6_addr *saddr) + struct in6_addr *saddr, + struct dst_cache *dst_cache) { + bool use_cache = false; struct dst_entry *ndst; struct flowi6 fl6; int err; + if (dst_cache && !skb->mark) { + use_cache = true; + ndst = dst_cache_get_ip6(dst_cache, saddr); + if (ndst) + return ndst; + } + memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.daddr = *daddr; @@ -1797,6 +1832,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, return ERR_PTR(err); *saddr = fl6.saddr; + if (use_cache) + dst_cache_set_ip6(dst_cache, ndst, saddr); return ndst; } #endif @@ -1938,7 +1975,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - dst->sin.sin_addr.s_addr, &saddr); + dst->sin.sin_addr.s_addr, &saddr, + rdst ? &rdst->dst_cache : NULL, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &dst->sin.sin_addr.s_addr); @@ -1990,7 +2028,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, - &dst->sin6.sin6_addr, &saddr); + &dst->sin6.sin6_addr, &saddr, + rdst ? &rdst->dst_cache : NULL); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); @@ -2331,7 +2370,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; rt = vxlan_get_route(vxlan, skb, 0, info->key.tos, info->key.u.ipv4.dst, - &info->key.u.ipv4.src); + &info->key.u.ipv4.src, NULL, info); if (IS_ERR(rt)) return PTR_ERR(rt); ip_rt_put(rt); @@ -2343,7 +2382,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; ndst = vxlan6_get_route(vxlan, skb, 0, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src); + &info->key.u.ipv6.src, NULL); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 25bd919c9ef0..b314e4af89c5 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -148,6 +148,7 @@ struct vxlan_rdst { u32 remote_ifindex; struct list_head list; struct rcu_head rcu; + struct dst_cache dst_cache; }; struct vxlan_config { -- cgit v1.2.3 From d71785ffc7e7cae3fbdc4ea8a9d05b7a1c59f7b8 Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 12 Feb 2016 15:43:57 +0100 Subject: net: add dst_cache to ovs vxlan lwtunnel In case of UDP traffic with datagram length below MTU this give about 2% performance increase when tunneling over ipv4 and about 60% when tunneling over ipv6 Signed-off-by: Paolo Abeni Suggested-and-acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 15 ++++++++------- include/net/dst_metadata.h | 1 + include/net/ip_tunnels.h | 3 +++ net/core/dst.c | 10 +++++++++- net/openvswitch/Kconfig | 1 + net/openvswitch/flow_netlink.c | 6 ++++++ 6 files changed, 28 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ad673037bd73..ee1206d9f8df 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1775,7 +1775,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, /* when the ip_tunnel_info is availble, the tos used for lookup is * packet independent, so we can use the cache */ - if (dst_cache && !skb->mark && (!tos || info)) { + if (!skb->mark && (!tos || info)) { use_cache = true; rt = dst_cache_get_ip4(dst_cache, saddr); if (rt) @@ -1806,13 +1806,11 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct in6_addr *saddr, struct dst_cache *dst_cache) { - bool use_cache = false; struct dst_entry *ndst; struct flowi6 fl6; int err; - if (dst_cache && !skb->mark) { - use_cache = true; + if (!skb->mark) { ndst = dst_cache_get_ip6(dst_cache, saddr); if (ndst) return ndst; @@ -1832,7 +1830,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, return ERR_PTR(err); *saddr = fl6.saddr; - if (use_cache) + if (!skb->mark) dst_cache_set_ip6(dst_cache, ndst, saddr); return ndst; } @@ -1886,6 +1884,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_rdst *rdst, bool did_rsc) { + struct dst_cache *dst_cache; struct ip_tunnel_info *info; struct vxlan_dev *vxlan = netdev_priv(dev); struct sock *sk; @@ -1910,6 +1909,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, dst_port = rdst->remote_port ? rdst->remote_port : vxlan->cfg.dst_port; vni = rdst->remote_vni; dst = &rdst->remote_ip; + dst_cache = &rdst->dst_cache; } else { if (!info) { WARN_ONCE(1, "%s: Missing encapsulation instructions\n", @@ -1924,6 +1924,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, else remote_ip.sin6.sin6_addr = info->key.u.ipv6.dst; dst = &remote_ip; + dst_cache = &info->dst_cache; } if (vxlan_addr_any(dst)) { @@ -1976,7 +1977,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, dst->sin.sin_addr.s_addr, &saddr, - rdst ? &rdst->dst_cache : NULL, info); + dst_cache, info); if (IS_ERR(rt)) { netdev_dbg(dev, "no route to %pI4\n", &dst->sin.sin_addr.s_addr); @@ -2029,7 +2030,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, &dst->sin6.sin6_addr, &saddr, - rdst ? &rdst->dst_cache : NULL); + dst_cache); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 30a56ab2ccfb..84b833af6882 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -62,6 +62,7 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a, sizeof(a->u.tun_info) + a->u.tun_info.options_len); } +void metadata_dst_free(struct metadata_dst *); struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags); struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index fd36936d85a6..87408ab80856 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -58,6 +58,9 @@ struct ip_tunnel_key { struct ip_tunnel_info { struct ip_tunnel_key key; +#ifdef CONFIG_DST_CACHE + struct dst_cache dst_cache; +#endif u8 options_len; u8 mode; }; diff --git a/net/core/dst.c b/net/core/dst.c index a1656e3b8d72..b5cbbe07f786 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -265,7 +265,7 @@ again: lwtstate_put(dst->lwtstate); if (dst->flags & DST_METADATA) - kfree(dst); + metadata_dst_free((struct metadata_dst *)dst); else kmem_cache_free(dst->ops->kmem_cachep, dst); @@ -395,6 +395,14 @@ struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags) } EXPORT_SYMBOL_GPL(metadata_dst_alloc); +void metadata_dst_free(struct metadata_dst *md_dst) +{ +#ifdef CONFIG_DST_CACHE + dst_cache_destroy(&md_dst->u.tun_info.dst_cache); +#endif + kfree(md_dst); +} + struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags) { int cpu; diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index d143aa9f6654..cd5fd9d728a7 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -10,6 +10,7 @@ config OPENVSWITCH select LIBCRC32C select MPLS select NET_MPLS_GSO + select DST_CACHE ---help--- Open vSwitch is a multilayer Ethernet switch targeted at virtualized environments. In addition to supporting a variety of features diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index d1bd4a45ca2d..58b8efc23668 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -1959,6 +1959,12 @@ static int validate_and_copy_set_tun(const struct nlattr *attr, if (!tun_dst) return -ENOMEM; + err = dst_cache_init(&tun_dst->u.tun_info.dst_cache, GFP_KERNEL); + if (err) { + dst_release((struct dst_entry *)tun_dst); + return err; + } + a = __add_action(sfa, OVS_KEY_ATTR_TUNNEL_INFO, NULL, sizeof(*ovs_tun), log); if (IS_ERR(a)) { -- cgit v1.2.3 From 468dfffcd762cbb2777ec5a76bc21e3748ebf47e Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 12 Feb 2016 15:43:58 +0100 Subject: geneve: add dst caching support use generic dst implementation for both plain geneve devices and lwtunnels. In case of UDP traffic with datagram length below MTU this give about 2% performance increase for plain geneve tunnel over ipv4, about 65% performance increase for ipv6 tunnel. Signed-off-by: Paolo Abeni Suggested-and-Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/geneve.c | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 0b14ac3b8d11..6f208132a574 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -72,6 +72,7 @@ struct geneve_dev { bool collect_md; struct gro_cells gro_cells; u32 flags; + struct dst_cache dst_cache; }; /* Geneve device flags */ @@ -297,6 +298,13 @@ static int geneve_init(struct net_device *dev) return err; } + err = dst_cache_init(&geneve->dst_cache, GFP_KERNEL); + if (err) { + free_percpu(dev->tstats); + gro_cells_destroy(&geneve->gro_cells); + return err; + } + return 0; } @@ -304,6 +312,7 @@ static void geneve_uninit(struct net_device *dev) { struct geneve_dev *geneve = netdev_priv(dev); + dst_cache_destroy(&geneve->dst_cache); gro_cells_destroy(&geneve->gro_cells); free_percpu(dev->tstats); } @@ -753,7 +762,9 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct ip_tunnel_info *info) { struct geneve_dev *geneve = netdev_priv(dev); + struct dst_cache *dst_cache; struct rtable *rt = NULL; + bool use_cache = true; __u8 tos; memset(fl4, 0, sizeof(*fl4)); @@ -764,16 +775,26 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; fl4->flowi4_tos = RT_TOS(info->key.tos); + dst_cache = &info->dst_cache; } else { tos = geneve->tos; if (tos == 1) { const struct iphdr *iip = ip_hdr(skb); tos = ip_tunnel_get_dsfield(iip, skb); + use_cache = false; } fl4->flowi4_tos = RT_TOS(tos); fl4->daddr = geneve->remote.sin.sin_addr.s_addr; + dst_cache = &geneve->dst_cache; + } + + use_cache = use_cache && !skb->mark; + if (use_cache) { + rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); + if (rt) + return rt; } rt = ip_route_output_key(geneve->net, fl4); @@ -786,6 +807,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, ip_rt_put(rt); return ERR_PTR(-ELOOP); } + if (use_cache) + dst_cache_set_ip4(dst_cache, &rt->dst, fl4->saddr); return rt; } @@ -798,6 +821,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; + struct dst_cache *dst_cache; + bool use_cache = true; __u8 prio; memset(fl6, 0, sizeof(*fl6)); @@ -808,16 +833,26 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; fl6->flowi6_tos = RT_TOS(info->key.tos); + dst_cache = &info->dst_cache; } else { prio = geneve->tos; if (prio == 1) { const struct iphdr *iip = ip_hdr(skb); prio = ip_tunnel_get_dsfield(iip, skb); + use_cache = false; } fl6->flowi6_tos = RT_TOS(prio); fl6->daddr = geneve->remote.sin6.sin6_addr; + dst_cache = &geneve->dst_cache; + } + + use_cache = use_cache && !skb->mark; + if (use_cache) { + dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); + if (dst) + return dst; } if (ipv6_stub->ipv6_dst_lookup(geneve->net, gs6->sock->sk, &dst, fl6)) { @@ -830,6 +865,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, return ERR_PTR(-ELOOP); } + if (use_cache) + dst_cache_set_ip6(dst_cache, dst, &fl6->saddr); return dst; } #endif @@ -1272,6 +1309,8 @@ static int geneve_configure(struct net *net, struct net_device *dev, return -EPERM; } + dst_cache_reset(&geneve->dst_cache); + err = register_netdevice(dev); if (err) return err; -- cgit v1.2.3 From 2fcc1a401eb06ff41cd551672ea588c6efd8c4e6 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:43 -0800 Subject: i40e: AQ Add VXLAN-GPE tunnel type Add the new Cisco VXLAN-GPE cloud tunnel type for the Add Cloud Filter and UDP tunnel AQ commands. Change-ID: I2c093c7d79726c7fca08a36e5c63581a905da3d2 Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 3 +++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 3 +++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 7e9661592c52..5effb912ad35 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1278,6 +1278,8 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 @@ -2229,6 +2231,7 @@ struct i40e_aqc_add_udp_tunnel { #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 2c8f3ed15822..bdb18f68aad7 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1275,6 +1275,8 @@ struct i40e_aqc_add_remove_cloud_filters_element_data { #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 +#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 #define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 @@ -2121,6 +2123,7 @@ struct i40e_aqc_add_udp_tunnel { #define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 #define I40E_AQC_TUNNEL_TYPE_NGE 0x01 #define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 +#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 u8 reserved1[10]; }; -- cgit v1.2.3 From 27090cbdc345cd5fd6f6e57a8986c504c27e4d23 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 12 Feb 2016 11:42:34 +0100 Subject: net: phy: spi_ks8995: include linux/gpio/consumer.h The ks8995 phy driver just started using gpiod_* functions, which are declared in linux/gpio/consumer.h, not linux/gpio.h, resulting in a build failure in randconfig builds that do not have CONFIG_GPIOLIB enabled: drivers/net/phy/spi_ks8995.c: In function 'ks8995_probe': drivers/net/phy/spi_ks8995.c:477:3: error: implicit declaration of function 'gpiod_set_value' [-Werror=implicit-function-declaration] This changes the header inclusion so it builds in all configurations. Signed-off-by: Arnd Bergmann Fixes: cd6f288cbaab ("net: phy: spi_ks8995: add support for resetting switch using GPIO") Signed-off-by: David S. Miller --- drivers/net/phy/spi_ks8995.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c index 5e7340f6b37c..b5d50d458728 100644 --- a/drivers/net/phy/spi_ks8995.c +++ b/drivers/net/phy/spi_ks8995.c @@ -18,8 +18,8 @@ #include #include #include +#include #include -#include #include #include -- cgit v1.2.3 From 6774faf96437e8192e4ee2d16f1399ec9f842b80 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:44 -0800 Subject: i40e: AQ thermal sensor control struct Add the new AQ command and struct for managing a thermal sensor. Change-ID: I6f5631839a0f3dca352a6c222f1269a960e2310a Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 17 +++++++++++++++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 17 +++++++++++++++++ 2 files changed, 34 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 5effb912ad35..9684572d0f1a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -230,6 +230,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1952,6 +1953,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer { I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index bdb18f68aad7..64685378ba99 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -230,6 +230,7 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_nvm_config_read = 0x0704, i40e_aqc_opc_nvm_config_write = 0x0705, i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, /* virtualization commands */ i40e_aqc_opc_send_msg_to_pf = 0x0801, @@ -1949,6 +1950,22 @@ struct i40e_aqc_nvm_oem_post_update_buffer { I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); +/* Thermal Sensor (indirect 0x0721) + * read or set thermal sensor configs and values + * takes a sensor and command specific data buffer, not detailed here + */ +struct i40e_aqc_thermal_sensor { + u8 sensor_action; +#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 +#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 +#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 + u8 reserved[7]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); + /* Send to PF command (indirect 0x0801) id is only used by PF * Send to VF command (indirect 0x0802) id is only used by PF * Send to Peer PF command (indirect 0x0803) -- cgit v1.2.3 From 97b884fecd6186ac1bcc932c453dce305d81040b Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:45 -0800 Subject: i40e: Bump AQ minor version to 1.5 for new FW features Bump AQ minor version to 1.5 for new FW features. Change-ID: I5a790f7f519a2a8921aaa1c5663727dd1897ffec Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 2 +- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 9684572d0f1a..56992997a71c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0005 struct i40e_aq_desc { __le16 flags; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 64685378ba99..ed963bc09a82 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -34,7 +34,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR 0x0004 +#define I40E_FW_API_VERSION_MINOR 0x0005 struct i40e_aq_desc { __le16 flags; -- cgit v1.2.3 From 071c859b878a05c3f603669bfda2e5b4332dd695 Mon Sep 17 00:00:00 2001 From: Pandi Maharajan Date: Wed, 23 Dec 2015 12:05:46 -0800 Subject: i40e: Store lan_vsi_idx and lan_vsi_id in the right size lan_vsi_idx and lan_vsi_id are assigned to u16 data sized variables but declared in u8. This patch fixes the width of the datatype. Change-ID: If4bcbcc7d32f2b287c51cb33d17879691258dce2 Signed-off-by: Pandi Kumar Maharajan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index da44995def42..e74642a0c42e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -91,8 +91,8 @@ struct i40e_vf { * When assigned, these will be non-zero, because VSI 0 is always * the main LAN VSI for the PF. */ - u8 lan_vsi_idx; /* index into PF struct */ - u8 lan_vsi_id; /* ID as used by firmware */ + u16 lan_vsi_idx; /* index into PF struct */ + u16 lan_vsi_id; /* ID as used by firmware */ u8 num_queue_pairs; /* num of qps assigned to VF vsis */ u64 num_mdd_events; /* num of mdd events detected */ -- cgit v1.2.3 From a3d772a3925d85721ad8518db14603fb1cd99295 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 23 Dec 2015 12:05:47 -0800 Subject: i40e: fix write-back-on-itr to work with legacy itr We were not doing write-back on interrupt throttle for Legacy case in X722. This patch fixes that, so we do WB_ON_ITR for Legacy as well. Plus the issue that we should still be setting NO_ITR if we are touching the DYN_CTLN register since we do not want to change ITR setting here. Change-ID: I5db8491ee1544118a389db839cecc93e1bbc480e Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 18 +++++++++++++----- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 3 ++- 2 files changed, 15 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 47bd8b3145a7..6234136fe052 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -789,12 +789,20 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) if (q_vector->arm_wb_state) return; - val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK; + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + + vsi->base_vector - 1), + val); + } else { + val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), - val); + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } q_vector->arm_wb_state = true; } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 7d663fb61927..3e692352f38d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -307,7 +307,8 @@ static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector if (q_vector->arm_wb_state) return; - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK; + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ wr32(&vsi->back->hw, I40E_VFINT_DYN_CTLN1(q_vector->v_idx + -- cgit v1.2.3 From 1d0a4ada8401c73b360fc8492064a1107fca019b Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 23 Dec 2015 12:05:48 -0800 Subject: i40e: add counter for arq overflows Sometimes, ARQ overflows are a big deal and tell us that the firmware/hardware/driver/something is having problems. But normally they're no big deal. To assist in assessing this, add a counter to our Ethtool stats. A handful of ARQ overflows during VF init is no problem. A large, ever-growing number indicates that Something Bad is happening. Change-ID: Ie5348bfbc8a54a890559cb00279c28d976a55096 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 3 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 53ed3bdd8363..944dee98c4ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -393,6 +393,7 @@ struct i40e_pf { struct i40e_vf *vf; int num_alloc_vfs; /* actual number of VFs allocated */ u32 vf_aq_requests; + u32 arq_overflows; /* Not fatal, possibly indicative of problems */ /* DCBx/DCBNL capability for PF that indicates * whether DCBx is managed by firmware or host diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 45495911c5a4..812c95493413 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -143,6 +143,7 @@ static struct i40e_stats i40e_gstrings_stats[] = { I40E_PF_STAT("rx_oversize", stats.rx_oversize), I40E_PF_STAT("rx_jabber", stats.rx_jabber), I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests), + I40E_PF_STAT("arq_overflows", arq_overflows), I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt), I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match), diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 320b0491abd9..7323e322071a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6248,6 +6248,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) if (hw->debug_mask & I40E_DEBUG_AQ) dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK; + pf->arq_overflows++; } if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) { if (hw->debug_mask & I40E_DEBUG_AQ) -- cgit v1.2.3 From 07f169c3e9df898afe951d74734a0e54ac89ee9d Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 23 Dec 2015 12:05:49 -0800 Subject: i40e: add 20G speed for Tx bandwidth calculations When calculating TX bandwidth for VFs, we need to know the link speed to make sure we don't allocate more bandwidth than is available. Add 20G link speed to the switch statement so we can support devices that link at that speed. Change-ID: I5409f6139d549e5832777db9c22ca0664e0c5f8b Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 659d78270fdb..1635c7a0e505 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2306,6 +2306,9 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, case I40E_LINK_SPEED_40GB: speed = 40000; break; + case I40E_LINK_SPEED_20GB: + speed = 20000; + break; case I40E_LINK_SPEED_10GB: speed = 10000; break; -- cgit v1.2.3 From dd54a1ada9cea9275c53c89ad869214be6e9731a Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 23 Dec 2015 12:05:50 -0800 Subject: i40e: refactor DCB function This is a simple refactor suggested by the community to change a multi-level if statement into a switch. Change-ID: I831cf3c40426022220aa9b43990022d22dfd50db Signed-off-by: Jesse Brandeburg Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_dcb.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb.c b/drivers/net/ethernet/intel/i40e/i40e_dcb.c index 582daa7ad776..0fab3a9b51d9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb.c @@ -380,17 +380,20 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, { u16 length, typelength, offset = 0; struct i40e_cee_app_prio *app; - u8 i, up, selector; + u8 i; typelength = ntohs(tlv->hdr.typelen); length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >> I40E_LLDP_TLV_LEN_SHIFT); dcbcfg->numapps = length / sizeof(*app); + if (!dcbcfg->numapps) return; for (i = 0; i < dcbcfg->numapps; i++) { + u8 up, selector; + app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset); for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) { if (app->prio_map & BIT(up)) @@ -400,13 +403,17 @@ static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv, /* Get Selector from lower 2 bits, and convert to IEEE */ selector = (app->upper_oui_sel & I40E_CEE_APP_SELECTOR_MASK); - if (selector == I40E_CEE_APP_SEL_ETHTYPE) + switch (selector) { + case I40E_CEE_APP_SEL_ETHTYPE: dcbcfg->app[i].selector = I40E_APP_SEL_ETHTYPE; - else if (selector == I40E_CEE_APP_SEL_TCPIP) + break; + case I40E_CEE_APP_SEL_TCPIP: dcbcfg->app[i].selector = I40E_APP_SEL_TCPIP; - else + break; + default: /* Keep selector as it is for unknown types */ dcbcfg->app[i].selector = selector; + } dcbcfg->app[i].protocolid = ntohs(app->protocol); /* Move to next app */ -- cgit v1.2.3 From 1d73b2db4b9f4c12d42451efd09887ff7b37b02d Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:51 -0800 Subject: i40e: add a little more to an NVM update debug message Add a little more detail to an NVM update debug message in order to see the full ethtool request data. Change-ID: Iab10437cb32d6fddc67ee347e7c0b42511e152cd Signed-off-by: Shannon Nelson Acked-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 6100cdd9ad13..5730f8091e1b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -693,10 +693,11 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->aq.nvm_release_on_done); + hw->aq.nvm_release_on_done, + cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { *perrno = -EFAULT; -- cgit v1.2.3 From 509a447ae80c2e48b991fa7556a1b0d2a7ee8a51 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 23 Dec 2015 12:05:52 -0800 Subject: i40evf: enable bus master after reset If the VF is reset via VFLR, the device will be knocked out of bus master mode, and the driver will fail to recover from the reset. Fix this by enabling bus mastering after every reset. In a non-VFLR case, the bus master bit will not be disabled, and this call will have no effect. Change-ID: Id515859ac7a691db478222228add6d149e96801a Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 66964eb6b7de..933f1fc5dfbb 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1837,6 +1837,7 @@ static void i40evf_reset_task(struct work_struct *work) break; msleep(I40EVF_RESET_WAIT_MS); } + pci_set_master(adapter->pdev); /* extra wait to make sure minimum wait is met */ msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { -- cgit v1.2.3 From de1017f76a9ba9c354e238ad94c88725a3688dce Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 23 Dec 2015 12:05:53 -0800 Subject: i40e: add netdev info to VSI dump Add a few more bits of netdev data into the debugfs output for dump VSI. For now, we'll add the features, hw_features, vlan_features, and flags bitflags and the state. More could be added later if needed. Also, tweak a couple nearby output lines for output readability. Change-ID: I9fb5a9da75c9ad7679498ce9ac3ba24d065ddd2e Signed-off-by: Shannon Nelson Reviewed-by: Brandeburg, Jesse Reviewed-by: Wyborny, Carolyn Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 10744a698d6f..7aae0561c9c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -379,19 +379,27 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) return; } dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); - if (vsi->netdev) - dev_info(&pf->pdev->dev, - " netdev: name = %s\n", - vsi->netdev->name); + if (vsi->netdev) { + struct net_device *nd = vsi->netdev; + + dev_info(&pf->pdev->dev, " netdev: name = %s, state = %lu, flags = 0x%08x\n", + nd->name, nd->state, nd->flags); + dev_info(&pf->pdev->dev, " features = 0x%08lx\n", + (unsigned long int)nd->features); + dev_info(&pf->pdev->dev, " hw_features = 0x%08lx\n", + (unsigned long int)nd->hw_features); + dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n", + (unsigned long int)nd->vlan_features); + } if (vsi->active_vlans) dev_info(&pf->pdev->dev, " vlgrp: & = %p\n", vsi->active_vlans); dev_info(&pf->pdev->dev, - " netdev_registered = %i, current_netdev_flags = 0x%04x, state = %li flags = 0x%08lx\n", - vsi->netdev_registered, - vsi->current_netdev_flags, vsi->state, vsi->flags); + " state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n", + vsi->state, vsi->flags, + vsi->netdev_registered, vsi->current_netdev_flags); if (vsi == pf->vsi[pf->lan_vsi]) - dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", + dev_info(&pf->pdev->dev, " MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", pf->hw.mac.addr, pf->hw.mac.san_addr, pf->hw.mac.port_addr); -- cgit v1.2.3 From 2eccf1d611c09edb80cf5805041a34805a3e3b43 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 23 Dec 2015 12:05:54 -0800 Subject: i40e: remove VF device IDs from PF The PF doesn't need to know about the VF's device IDs, so remove them. Change-ID: I62cf0e0fffa1ace586e58e00bc271b10ae440f05 Signed-off-by: Jesse Brandeburg Acked-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 8 -------- drivers/net/ethernet/intel/i40e/i40e_devids.h | 4 ---- 2 files changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 3b03a3165ca7..9018f286e30c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -62,14 +62,6 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_10G_BASE_T_X722: hw->mac.type = I40E_MAC_X722; break; - case I40E_DEV_ID_X722_VF: - case I40E_DEV_ID_X722_VF_HV: - hw->mac.type = I40E_MAC_X722_VF; - break; - case I40E_DEV_ID_VF: - case I40E_DEV_ID_VF_HV: - hw->mac.type = I40E_MAC_VF; - break; default: hw->mac.type = I40E_MAC_GENERIC; break; diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h index f7ce5c7c9003..99257fcd1ef4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_devids.h +++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h @@ -39,15 +39,11 @@ #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_20G_KR2_A 0x1588 #define I40E_DEV_ID_10G_BASE_T4 0x1589 -#define I40E_DEV_ID_VF 0x154C -#define I40E_DEV_ID_VF_HV 0x1571 #define I40E_DEV_ID_KX_X722 0x37CE #define I40E_DEV_ID_QSFP_X722 0x37CF #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 -#define I40E_DEV_ID_X722_VF 0x37CD -#define I40E_DEV_ID_X722_VF_HV 0x37D9 #define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \ (d) == I40E_DEV_ID_QSFP_B || \ -- cgit v1.2.3 From d17038d6872b02df984d3be453b732c833d282df Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 23 Dec 2015 12:05:55 -0800 Subject: i40e: trivial: remove unnecessary local var Probe routine already has too many locals, just convert one used for kzalloc into a kcalloc, eliminating the local. Change-ID: I349049872b71f858cbeb91ad7836e6767fc7b7d1 Signed-off-by: Jesse Brandeburg Reviewed-by: Anjali Singhai Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7323e322071a..a2643fe8c507 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10637,7 +10637,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) u16 wol_nvm_bits; u16 link_status; int err; - u32 len; u32 val; u32 i; u8 set_fc_aq_fail; @@ -10896,8 +10895,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */ - len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; - pf->vsi = kzalloc(len, GFP_KERNEL); + pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *), + GFP_KERNEL); if (!pf->vsi) { err = -ENOMEM; goto err_switch_setup; -- cgit v1.2.3 From 82f399c9350dd2531acaf2fb25a701f58a49f936 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Wed, 23 Dec 2015 12:05:56 -0800 Subject: i40e/i40evf: Bump i40e to 1.4.11 and i40evf to 1.4.7 Bump. Change-ID: I21aa520a3c8c5f4f562a98019bf8b76b3706c480 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a2643fe8c507..9978c39f5940 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -51,7 +51,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 10 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 933f1fc5dfbb..ae6c6e11af83 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 4 +#define DRV_VERSION_BUILD 7 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From e4c6734eaab90695db0ea8456307790cb0c1ccb5 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Feb 2016 21:16:15 -0800 Subject: net: rework ndo tc op to consume additional qdisc handle parameter The ndo_setup_tc() op was added to support drivers offloading tx qdiscs however only support for mqprio was ever added. So we only ever added support for passing the number of traffic classes to the driver. This patch generalizes the ndo_setup_tc op so that a handle can be provided to indicate if the offload is for ingress or egress or potentially even child qdiscs. CC: Murali Karicheri CC: Shradha Shah CC: Or Gerlitz CC: Ariel Elior CC: Jeff Kirsher CC: Bruce Allan CC: Jesse Brandeburg CC: Don Skidmore Signed-off-by: John Fastabend Acked-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 5 ++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 7 +++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 1 + drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 ++++- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 10 +++++++++- drivers/net/ethernet/intel/i40e/i40e.h | 2 +- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_main.c | 17 ++++++++++++----- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 11 ++++++++++- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 12 ++++++++++-- drivers/net/ethernet/sfc/efx.h | 2 +- drivers/net/ethernet/sfc/tx.c | 5 ++++- drivers/net/ethernet/ti/netcp_core.c | 5 ++++- include/linux/netdevice.h | 3 ++- net/sched/sch_mqprio.c | 5 +++-- 16 files changed, 74 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 8a9b493566c9..9955cae3cabc 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1626,12 +1626,15 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u8 tc) +static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u8 tc) { struct xgbe_prv_data *pdata = netdev_priv(netdev); unsigned int offset, queue; u8 i; + if (handle != TC_H_ROOT) + return -EINVAL; + if (tc && (tc != pdata->hw_feat.tc_cnt)) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 9e42bcaf9917..b262cba34dfa 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4272,6 +4272,13 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u8 num_tc) +{ + if (handle != TC_H_ROOT) + return -EINVAL; + return bnx2x_setup_tc(dev, num_tc); +} + /* called with rtnl_lock */ int bnx2x_change_mac_addr(struct net_device *dev, void *p) { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 37369865ca6d..60a4109dcdeb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,6 +486,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u8 num_tc); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c5845252c920..81fc51c4ec2b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13061,7 +13061,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = poll_bnx2x, #endif - .ndo_setup_tc = bnx2x_setup_tc, + .ndo_setup_tc = __bnx2x_setup_tc, #ifdef CONFIG_BNX2X_SRIOV .ndo_set_vf_mac = bnx2x_set_vf_mac, .ndo_set_vf_vlan = bnx2x_set_vf_vlan, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5dc89e527e7d..ff08faf44ee5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5370,10 +5370,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u8 tc) +static int bnxt_setup_tc(struct net_device *dev, u32 handle, u8 tc) { struct bnxt *bp = netdev_priv(dev); + if (handle != TC_H_ROOT) + return -EINVAL; + if (tc > bp->max_tc) { netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", tc, bp->max_tc); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 662569d5b7c0..12701a492325 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1204,6 +1204,14 @@ err_queueing_scheme: return err; } +static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u8 tc) +{ + if (handle != TC_H_ROOT) + return -EINVAL; + + return fm10k_setup_tc(dev, tc); +} + static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { switch (cmd) { @@ -1386,7 +1394,7 @@ static const struct net_device_ops fm10k_netdev_ops = { .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid, .ndo_set_rx_mode = fm10k_set_rx_mode, .ndo_get_stats64 = fm10k_get_stats64, - .ndo_setup_tc = fm10k_setup_tc, + .ndo_setup_tc = __fm10k_setup_tc, .ndo_set_vf_mac = fm10k_ndo_set_vf_mac, .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan, .ndo_set_vf_rate = fm10k_ndo_set_vf_bw, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 53ed3bdd8363..ef9ca075d5e5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -788,7 +788,7 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE int i40e_close(struct net_device *netdev); -int i40e_setup_tc(struct net_device *netdev, u8 tc); +int __i40e_setup_tc(struct net_device *netdev, u32 handle, u8 tc); void i40e_netpoll(struct net_device *netdev); int i40e_fcoe_enable(struct net_device *netdev); int i40e_fcoe_disable(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 579a46ca82df..7c66ce416ec7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1457,7 +1457,7 @@ static const struct net_device_ops i40e_fcoe_netdev_ops = { .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, - .ndo_setup_tc = i40e_setup_tc, + .ndo_setup_tc = __i40e_setup_tc, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 320b0491abd9..abcb6c152186 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5253,11 +5253,7 @@ void i40e_down(struct i40e_vsi *vsi) * @netdev: net device to configure * @tc: number of traffic classes to enable **/ -#ifdef I40E_FCOE -int i40e_setup_tc(struct net_device *netdev, u8 tc) -#else static int i40e_setup_tc(struct net_device *netdev, u8 tc) -#endif { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -5310,6 +5306,17 @@ exit: return ret; } +#ifdef I40E_FCOE +int __i40e_setup_tc(struct net_device *netdev, u32 handle, u8 tc) +#else +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, u8 tc) +#endif +{ + if (handle != TC_H_ROOT) + return -EINVAL; + return i40e_setup_tc(netdev, tc); +} + /** * i40e_open - Called when a network interface is made active * @netdev: network interface device structure @@ -8951,7 +8958,7 @@ static const struct net_device_ops i40e_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = i40e_netpoll, #endif - .ndo_setup_tc = i40e_setup_tc, + .ndo_setup_tc = __i40e_setup_tc, #ifdef I40E_FCOE .ndo_fcoe_enable = i40e_fcoe_enable, .ndo_fcoe_disable = i40e_fcoe_disable, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0c701b8438b6..1ba714efd78c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8200,6 +8200,15 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u8 tc) +{ + /* Only support egress tc setup for now */ + if (handle != TC_H_ROOT) + return -EINVAL; + + return ixgbe_setup_tc(dev, tc); +} + #ifdef CONFIG_PCI_IOV void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) { @@ -8658,7 +8667,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB - .ndo_setup_tc = ixgbe_setup_tc, + .ndo_setup_tc = __ixgbe_setup_tc, #endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 0c7e3f69a73b..d5c6c16b9457 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -69,6 +69,14 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } +static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, u8 up) +{ + if (handle != TC_H_ROOT) + return -EINVAL; + + return mlx4_en_setup_tc(dev, up); +} + #ifdef CONFIG_RFS_ACCEL struct mlx4_en_filter { @@ -2466,7 +2474,7 @@ static const struct net_device_ops mlx4_netdev_ops = { #endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, - .ndo_setup_tc = mlx4_en_setup_tc, + .ndo_setup_tc = __mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif @@ -2504,7 +2512,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = { #endif .ndo_set_features = mlx4_en_set_features, .ndo_fix_features = mlx4_en_fix_features, - .ndo_setup_tc = mlx4_en_setup_tc, + .ndo_setup_tc = __mlx4_en_setup_tc, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx4_en_filter_rfs, #endif diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 10827476bc0b..7815fa09b15d 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,7 +32,7 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +int efx_setup_tc(struct net_device *net_dev, u32 handle, u8 num_tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index f7a0ec1bca97..8f1d53e2aca7 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -562,7 +562,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u8 num_tc) +int efx_setup_tc(struct net_device *net_dev, u32 handle, u8 num_tc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; @@ -570,6 +570,9 @@ int efx_setup_tc(struct net_device *net_dev, u8 num_tc) unsigned tc; int rc; + if (handle != TC_H_ROOT) + return -EINVAL; + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index c61d66d38634..40cde814608b 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1835,13 +1835,16 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u8 num_tc) +static int netcp_setup_tc(struct net_device *dev, u32 handle, u8 num_tc) { int i; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); + if (handle != TC_H_ROOT) + return -EINVAL; + /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || (dev->real_num_tx_queues < num_tc)) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0499569c256d..48928b6f9cb6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -51,6 +51,7 @@ #include #include #include +#include struct netpoll_info; struct device; @@ -1150,7 +1151,7 @@ struct net_device_ops { int (*ndo_set_vf_rss_query_en)( struct net_device *dev, int vf, bool setting); - int (*ndo_setup_tc)(struct net_device *dev, u8 tc); + int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u8 tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index ad70ecf57ce7..f5a0e8a4dbd7 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -39,7 +39,7 @@ static void mqprio_destroy(struct Qdisc *sch) } if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) - dev->netdev_ops->ndo_setup_tc(dev, 0); + dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0); else netdev_set_num_tc(dev, 0); } @@ -141,7 +141,8 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) */ if (qopt->hw) { priv->hw_owned = 1; - err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc); + err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, + qopt->num_tc); if (err) goto err; } else { -- cgit v1.2.3 From 16e5cc647173a97e33b3e3ba81f73eb455561794 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Feb 2016 21:16:43 -0800 Subject: net: rework setup_tc ndo op to consume general tc operand This patch updates setup_tc so we can pass additional parameters into the ndo op in a generic way. To do this we provide structured union and type flag. This lets each classifier and qdisc provide its own set of attributes without having to add new ndo ops or grow the signature of the callback. Signed-off-by: John Fastabend Acked-by: Jiri Pirko Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 9 ++++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 7 ++++--- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 3 ++- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 ++++++-- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 7 ++++--- drivers/net/ethernet/intel/i40e/i40e.h | 3 ++- drivers/net/ethernet/intel/i40e/i40e_main.c | 10 ++++++---- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 ++++--- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 7 ++++--- drivers/net/ethernet/sfc/efx.h | 3 ++- drivers/net/ethernet/sfc/tx.c | 9 ++++++--- drivers/net/ethernet/ti/netcp_core.c | 13 +++++++------ include/linux/netdevice.h | 20 +++++++++++++++++++- net/sched/sch_mqprio.c | 9 ++++++--- 14 files changed, 78 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 9955cae3cabc..cfd3f7efda1c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1626,15 +1626,18 @@ static void xgbe_poll_controller(struct net_device *netdev) } #endif /* End CONFIG_NET_POLL_CONTROLLER */ -static int xgbe_setup_tc(struct net_device *netdev, u32 handle, u8 tc) +static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc_to_netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); unsigned int offset, queue; - u8 i; + u8 i, tc; - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO) return -EINVAL; + tc = tc_to_netdev->tc; + if (tc && (tc != pdata->hw_feat.tc_cnt)) return -EINVAL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b262cba34dfa..45843d150868 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4272,11 +4272,12 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) return 0; } -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u8 num_tc) +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return bnx2x_setup_tc(dev, num_tc); + return bnx2x_setup_tc(dev, tc->tc); } /* called with rtnl_lock */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 60a4109dcdeb..0e68fadecfdb 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -486,7 +486,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); /* setup_tc callback */ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); -int __bnx2x_setup_tc(struct net_device *dev, u32 handle, u8 num_tc); +int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); int bnx2x_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ff08faf44ee5..169920aa39f3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5370,13 +5370,17 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu) return 0; } -static int bnxt_setup_tc(struct net_device *dev, u32 handle, u8 tc) +static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) { struct bnxt *bp = netdev_priv(dev); + u8 tc; - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO) return -EINVAL; + tc = ntc->tc; + if (tc > bp->max_tc) { netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n", tc, bp->max_tc); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index 12701a492325..dc1a82148ff0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1204,12 +1204,13 @@ err_queueing_scheme: return err; } -static int __fm10k_setup_tc(struct net_device *dev, u32 handle, u8 tc) +static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return fm10k_setup_tc(dev, tc); + return fm10k_setup_tc(dev, tc->tc); } static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index ef9ca075d5e5..933c4b3d92c8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -788,7 +788,8 @@ struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr, bool is_vf, bool is_netdev); #ifdef I40E_FCOE int i40e_close(struct net_device *netdev); -int __i40e_setup_tc(struct net_device *netdev, u32 handle, u8 tc); +int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); void i40e_netpoll(struct net_device *netdev); int i40e_fcoe_enable(struct net_device *netdev); int i40e_fcoe_disable(struct net_device *netdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index abcb6c152186..257d16207976 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5307,14 +5307,16 @@ exit: } #ifdef I40E_FCOE -int __i40e_setup_tc(struct net_device *netdev, u32 handle, u8 tc) +int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) #else -static int __i40e_setup_tc(struct net_device *netdev, u32 handle, u8 tc) +static int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) #endif { - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return i40e_setup_tc(netdev, tc); + return i40e_setup_tc(netdev, tc->tc); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 1ba714efd78c..dca2298f4c36 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8200,13 +8200,14 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } -int __ixgbe_setup_tc(struct net_device *dev, u32 handle, u8 tc) +int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { /* Only support egress tc setup for now */ - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return ixgbe_setup_tc(dev, tc); + return ixgbe_setup_tc(dev, tc->tc); } #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d5c6c16b9457..01d6a9695586 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -69,12 +69,13 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) return 0; } -static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, u8 up) +static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; - return mlx4_en_setup_tc(dev, up); + return mlx4_en_setup_tc(dev, tc->tc); } #ifdef CONFIG_RFS_ACCEL diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 7815fa09b15d..5e3f93f04e62 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -32,7 +32,8 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); -int efx_setup_tc(struct net_device *net_dev, u32 handle, u8 num_tc); +int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *tc); unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_piobuf_size; extern bool efx_separate_tx_channels; diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 8f1d53e2aca7..2cdb5718ed66 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -562,17 +562,20 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) efx->n_tx_channels : 0)); } -int efx_setup_tc(struct net_device *net_dev, u32 handle, u8 num_tc) +int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, + struct tc_to_netdev *ntc) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; struct efx_tx_queue *tx_queue; - unsigned tc; + unsigned tc, num_tc; int rc; - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO) return -EINVAL; + num_tc = ntc->tc; + if (efx_nic_rev(efx) < EFX_REV_FALCON_B0 || num_tc > EFX_MAX_TX_TC) return -EINVAL; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 40cde814608b..8586a2034019 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1835,25 +1835,26 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, return 0; } -static int netcp_setup_tc(struct net_device *dev, u32 handle, u8 num_tc) +static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, + struct tc_to_netdev tc) { int i; /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (handle != TC_H_ROOT) + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; /* Sanity-check the number of traffic classes requested */ if ((dev->real_num_tx_queues <= 1) || - (dev->real_num_tx_queues < num_tc)) + (dev->real_num_tx_queues < tc->tc)) return -EINVAL; /* Configure traffic class to queue mappings */ - if (num_tc) { - netdev_set_num_tc(dev, num_tc); - for (i = 0; i < num_tc; i++) + if (tc->tc) { + netdev_set_num_tc(dev, tc->tc); + for (i = 0; i < tc->tc; i++) netdev_set_tc_queue(dev, i, 1, i); } else { netdev_reset_tc(dev); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 48928b6f9cb6..e396060f815f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -779,6 +779,21 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a, typedef u16 (*select_queue_fallback_t)(struct net_device *dev, struct sk_buff *skb); +/* This structure holds attributes of qdisc and classifiers + * that are being passed to the netdevice through the setup_tc op. + */ +enum { + TC_SETUP_MQPRIO, +}; + +struct tc_to_netdev { + unsigned int type; + union { + u8 tc; + }; +}; + + /* * This structure defines the management hooks for network devices. * The following hooks can be defined; unless noted otherwise, they are @@ -1151,7 +1166,10 @@ struct net_device_ops { int (*ndo_set_vf_rss_query_en)( struct net_device *dev, int vf, bool setting); - int (*ndo_setup_tc)(struct net_device *dev, u32 handle, u8 tc); + int (*ndo_setup_tc)(struct net_device *dev, + u32 handle, + __be16 protocol, + struct tc_to_netdev *tc); #if IS_ENABLED(CONFIG_FCOE) int (*ndo_fcoe_enable)(struct net_device *dev); int (*ndo_fcoe_disable)(struct net_device *dev); diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index f5a0e8a4dbd7..f9947d1f4952 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -28,6 +28,7 @@ static void mqprio_destroy(struct Qdisc *sch) { struct net_device *dev = qdisc_dev(sch); struct mqprio_sched *priv = qdisc_priv(sch); + struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO}; unsigned int ntx; if (priv->qdiscs) { @@ -39,7 +40,7 @@ static void mqprio_destroy(struct Qdisc *sch) } if (priv->hw_owned && dev->netdev_ops->ndo_setup_tc) - dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0); + dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); else netdev_set_num_tc(dev, 0); } @@ -140,9 +141,11 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt) * supplied and verified mapping */ if (qopt->hw) { + struct tc_to_netdev tc = {.type = TC_SETUP_MQPRIO, + .tc = qopt->num_tc}; + priv->hw_owned = 1; - err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, - qopt->num_tc); + err = dev->netdev_ops->ndo_setup_tc(dev, sch->handle, 0, &tc); if (err) goto err; } else { -- cgit v1.2.3 From 9d35cf062e05be8b8b2b7dbc943cd95352cd90cb Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Feb 2016 21:18:28 -0800 Subject: net: ixgbe: add minimal parser details for ixgbe This adds an ixgbe data structure that is used to determine what headers:fields can be matched and in what order they are supported. For hardware devices this can be a bit tricky because typically only pre-programmed (firmware, ucode, rtl) parse graphs will be supported and we don't yet have an interface to change these from the OS. So its sort of a you get whatever your friendly vendor provides affair at the moment. In the future we can add the get routines and set routines to update this data structure. One interesting thing to note here is the data structure here identifies ethernet, ip, and tcp fields without having to hardcode them as enumerations or use other identifiers. Signed-off-by: John Fastabend Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | 112 +++++++++++++++++++++++++ 1 file changed, 112 insertions(+) create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_model.h (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h new file mode 100644 index 000000000000..43ebec4362f5 --- /dev/null +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -0,0 +1,112 @@ +/******************************************************************************* + * + * Intel 10 Gigabit PCI Express Linux drive + * Copyright(c) 2013 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along + * with this program. If not, see . + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +#ifndef _IXGBE_MODEL_H_ +#define _IXGBE_MODEL_H_ + +#include "ixgbe.h" +#include "ixgbe_type.h" + +struct ixgbe_mat_field { + unsigned int off; + unsigned int mask; + int (*val)(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + __u32 val, __u32 m); + unsigned int type; +}; + +static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + __u32 val, __u32 m) +{ + input->filter.formatted.src_ip[0] = val; + mask->formatted.src_ip[0] = m; + return 0; +} + +static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + __u32 val, __u32 m) +{ + input->filter.formatted.dst_ip[0] = val; + mask->formatted.dst_ip[0] = m; + return 0; +} + +static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { + { .off = 12, .mask = -1, .val = ixgbe_mat_prgm_sip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .off = 16, .mask = -1, .val = ixgbe_mat_prgm_dip, + .type = IXGBE_ATR_FLOW_TYPE_IPV4}, + { .val = NULL } /* terminal node */ +}; + +static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + __u32 val, __u32 m) +{ + input->filter.formatted.src_port = val & 0xffff; + mask->formatted.src_port = m & 0xffff; + return 0; +}; + +static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input, + union ixgbe_atr_input *mask, + __u32 val, __u32 m) +{ + input->filter.formatted.dst_port = val & 0xffff; + mask->formatted.dst_port = m & 0xffff; + return 0; +}; + +static struct ixgbe_mat_field ixgbe_tcp_fields[] = { + {.off = 0, .mask = 0xffff, .val = ixgbe_mat_prgm_sport, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + {.off = 2, .mask = 0xffff, .val = ixgbe_mat_prgm_dport, + .type = IXGBE_ATR_FLOW_TYPE_TCPV4}, + { .val = NULL } /* terminal node */ +}; + +struct ixgbe_nexthdr { + /* offset, shift, and mask of position to next header */ + unsigned int o; + __u32 s; + __u32 m; + /* match criteria to make this jump*/ + unsigned int off; + __u32 val; + __u32 mask; + /* location of jump to make */ + struct ixgbe_mat_field *jump; +}; + +static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = { + { .o = 0, .s = 6, .m = 0xf, + .off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields}, + { .jump = NULL } /* terminal node */ +}; +#endif /* _IXGBE_MODEL_H_ */ -- cgit v1.2.3 From b82b17d929a692df1122fedc0ff4ddcef9cb6ad4 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Feb 2016 21:18:53 -0800 Subject: net: ixgbe: add support for tc_u32 offload This adds initial support for offloading the u32 tc classifier. This initial implementation only implements a few base matches and actions to illustrate the use of the infrastructure patches. However it is an interesting subset because it handles the u32 next hdr logic to correctly map tcp packets from ip headers using the ihl and protocol fields. After this is accepted we can extend the match and action fields easily by updating the model header file. Also only the drop action is supported initially. Here is a short test script, #tc qdisc add dev eth4 ingress #tc filter add dev eth4 parent ffff: protocol ip \ u32 ht 800: order 1 \ match ip dst 15.0.0.1/32 match ip src 15.0.0.2/32 action drop <-- hardware has dst/src ip match rule installed --> #tc filter del dev eth4 parent ffff: prio 49152 #tc filter add dev eth4 parent ffff: protocol ip prio 99 \ handle 1: u32 divisor 1 #tc filter add dev eth4 protocol ip parent ffff: prio 99 \ u32 ht 800: order 1 link 1: \ offset at 0 mask 0f00 shift 6 plus 0 eat match ip protocol 6 ff #tc filter add dev eth4 parent ffff: protocol ip \ u32 ht 1: order 3 match tcp src 23 ffff action drop <-- hardware has tcp src port rule installed --> #tc qdisc del dev eth4 parent ffff: <-- hardware cleaned up --> Signed-off-by: John Fastabend Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 6 + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 6 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 231 ++++++++++++++++++++--- 3 files changed, 213 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4b9156cd8b93..fc877c715c64 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -796,6 +796,9 @@ struct ixgbe_adapter { u8 default_up; unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ +#define IXGBE_MAX_LINK_HANDLE 10 + struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + /* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode */ @@ -925,6 +928,9 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, u16 soft_id); void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *mask); +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx); void ixgbe_set_rx_mode(struct net_device *netdev); #ifdef CONFIG_IXGBE_DCB void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bea96b3bc90c..726e0eeee63b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2520,9 +2520,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return ret; } -static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, - struct ixgbe_fdir_filter *input, - u16 sw_idx) +int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_fdir_filter *input, + u16 sw_idx) { struct ixgbe_hw *hw = &adapter->hw; struct hlist_node *node2; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index dca2298f4c36..abdfaead1ba1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -51,6 +51,8 @@ #include #include #include +#include +#include #ifdef CONFIG_OF #include @@ -65,6 +67,7 @@ #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" #include "ixgbe_sriov.h" +#include "ixgbe_model.h" char ixgbe_driver_name[] = "ixgbe"; static const char ixgbe_driver_string[] = @@ -5545,6 +5548,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) #endif /* CONFIG_IXGBE_DCB */ #endif /* IXGBE_FCOE */ + /* initialize static ixgbe jump table entries */ + adapter->jump_tables[0] = ixgbe_ipv4_fields; + adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * hw->mac.num_rar_entries, GFP_ATOMIC); @@ -8200,10 +8206,191 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) return 0; } +static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + int err; + + spin_lock(&adapter->fdir_perfect_lock); + err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, cls->knode.handle); + spin_unlock(&adapter->fdir_perfect_lock); + return err; +} + +static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + u32 loc = cls->knode.handle & 0xfffff; + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_mat_field *field_ptr; + struct ixgbe_fdir_filter *input; + union ixgbe_atr_input mask; +#ifdef CONFIG_NET_CLS_ACT + const struct tc_action *a; +#endif + int i, err = 0; + u8 queue; + u32 handle; + + memset(&mask, 0, sizeof(union ixgbe_atr_input)); + handle = cls->knode.handle; + + /* At the moment cls_u32 jumps to transport layer and skips past + * L2 headers. The canonical method to match L2 frames is to use + * negative values. However this is error prone at best but really + * just broken because there is no way to "know" what sort of hdr + * is in front of the transport layer. Fix cls_u32 to support L2 + * headers when needed. + */ + if (protocol != htons(ETH_P_IP)) + return -EINVAL; + + if (cls->knode.link_handle || + cls->knode.link_handle >= IXGBE_MAX_LINK_HANDLE) { + struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; + u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle); + + for (i = 0; nexthdr[i].jump; i++) { + if (nexthdr->o != cls->knode.sel->offoff || + nexthdr->s != cls->knode.sel->offshift || + nexthdr->m != cls->knode.sel->offmask || + /* do not support multiple key jumps its just mad */ + cls->knode.sel->nkeys > 1) + return -EINVAL; + + if (nexthdr->off != cls->knode.sel->keys[0].off || + nexthdr->val != cls->knode.sel->keys[0].val || + nexthdr->mask != cls->knode.sel->keys[0].mask) + return -EINVAL; + + if (uhtid >= IXGBE_MAX_LINK_HANDLE) + return -EINVAL; + + adapter->jump_tables[uhtid] = nexthdr->jump; + } + return 0; + } + + if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) { + e_err(drv, "Location out of range\n"); + return -EINVAL; + } + + /* cls u32 is a graph starting at root node 0x800. The driver tracks + * links and also the fields used to advance the parser across each + * link (e.g. nexthdr/eat parameters from 'tc'). This way we can map + * the u32 graph onto the hardware parse graph denoted in ixgbe_model.h + * To add support for new nodes update ixgbe_model.h parse structures + * this function _should_ be generic try not to hardcode values here. + */ + if (TC_U32_USERHTID(handle) == 0x800) { + field_ptr = adapter->jump_tables[0]; + } else { + if (TC_U32_USERHTID(handle) >= ARRAY_SIZE(adapter->jump_tables)) + return -EINVAL; + + field_ptr = adapter->jump_tables[TC_U32_USERHTID(handle)]; + } + + if (!field_ptr) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + for (i = 0; i < cls->knode.sel->nkeys; i++) { + int off = cls->knode.sel->keys[i].off; + __be32 val = cls->knode.sel->keys[i].val; + __be32 m = cls->knode.sel->keys[i].mask; + bool found_entry = false; + int j; + + for (j = 0; field_ptr[j].val; j++) { + if (field_ptr[j].off == off && + field_ptr[j].mask == m) { + field_ptr[j].val(input, &mask, val, m); + input->filter.formatted.flow_type |= + field_ptr[j].type; + found_entry = true; + break; + } + } + + if (!found_entry) + goto err_out; + } + + mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | + IXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK; + +#ifdef CONFIG_NET_CLS_ACT + if (list_empty(&cls->knode.exts->actions)) + goto err_out; + + list_for_each_entry(a, &cls->knode.exts->actions, list) { + if (!is_tcf_gact_shot(a)) + goto err_out; + } +#endif + + input->action = IXGBE_FDIR_DROP_QUEUE; + queue = IXGBE_FDIR_DROP_QUEUE; + input->sw_idx = loc; + + spin_lock(&adapter->fdir_perfect_lock); + + if (hlist_empty(&adapter->fdir_filter_list)) { + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); + err = ixgbe_fdir_set_input_mask_82599(hw, &mask); + if (err) + goto err_out_w_lock; + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { + err = -EINVAL; + goto err_out_w_lock; + } + + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter, + input->sw_idx, queue); + if (!err) + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + spin_unlock(&adapter->fdir_perfect_lock); + + return err; +err_out_w_lock: + spin_unlock(&adapter->fdir_perfect_lock); +err_out: + kfree(input); + return -EINVAL; +} + int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { - /* Only support egress tc setup for now */ + struct ixgbe_adapter *adapter = netdev_priv(dev); + + if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && + tc->type == TC_SETUP_CLSU32) { + if (!(dev->features & NETIF_F_HW_TC)) + return -EINVAL; + + switch (tc->cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return ixgbe_configure_clsu32(adapter, + proto, tc->cls_u32); + case TC_CLSU32_DELETE_KNODE: + return ixgbe_delete_clsu32(adapter, tc->cls_u32); + default: + return -EINVAL; + } + } + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) return -EINVAL; @@ -8272,19 +8459,17 @@ static int ixgbe_set_features(struct net_device *netdev, } /* - * Check if Flow Director n-tuple support was enabled or disabled. If - * the state changed, we need to reset. + * Check if Flow Director n-tuple support or hw_tc support was + * enabled or disabled. If the state changed, we need to reset. */ - switch (features & NETIF_F_NTUPLE) { - case NETIF_F_NTUPLE: + if ((features & NETIF_F_NTUPLE) || (features & NETIF_F_HW_TC)) { /* turn off ATR, enable perfect filters and reset */ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) need_reset = true; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; - break; - default: + } else { /* turn off perfect filters, enable ATR and reset */ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) need_reset = true; @@ -8292,23 +8477,16 @@ static int ixgbe_set_features(struct net_device *netdev, adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; /* We cannot enable ATR if SR-IOV is enabled */ - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) - break; - - /* We cannot enable ATR if we have 2 or more traffic classes */ - if (netdev_get_num_tc(netdev) > 1) - break; - - /* We cannot enable ATR if RSS is disabled */ - if (adapter->ring_feature[RING_F_RSS].limit <= 1) - break; - - /* A sample rate of 0 indicates ATR disabled */ - if (!adapter->atr_sample_rate) - break; - - adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; - break; + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED || + /* We cannot enable ATR if we have 2 or more tcs */ + (netdev_get_num_tc(netdev) > 1) || + /* We cannot enable ATR if RSS is disabled */ + (adapter->ring_feature[RING_F_RSS].limit <= 1) || + /* A sample rate of 0 indicates ATR disabled */ + (!adapter->atr_sample_rate)) + ; /* do nothing not supported */ + else /* otherwise supported and set the flag */ + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; } if (features & NETIF_F_HW_VLAN_CTAG_RX) @@ -8667,9 +8845,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, -#ifdef CONFIG_IXGBE_DCB .ndo_setup_tc = __ixgbe_setup_tc, -#endif #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ixgbe_netpoll, #endif @@ -9040,7 +9216,8 @@ skip_sriov: case ixgbe_mac_X550EM_x: netdev->features |= NETIF_F_SCTP_CRC; netdev->hw_features |= NETIF_F_SCTP_CRC | - NETIF_F_NTUPLE; + NETIF_F_NTUPLE | + NETIF_F_HW_TC; break; default: break; -- cgit v1.2.3 From db956ae882f4e7aa99c9c242a91ae942d08b6939 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Tue, 16 Feb 2016 21:19:19 -0800 Subject: net: ixgbe: abort with cls u32 divisor groups greater than 1 This patch ensures ixgbe will not try to offload hash tables from the u32 module. The device class does not currently support this so until it is enabled just abort on these tables. Interestingly the more flexible your hardware is the less code you need to implement to guard against these cases. Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1 + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 31 +++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index fc877c715c64..84fa28ceb200 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -798,6 +798,7 @@ struct ixgbe_adapter { #define IXGBE_MAX_LINK_HANDLE 10 struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE]; + unsigned long tables; /* maximum number of RETA entries among all devices supported by ixgbe * driver: currently it's x550 device in non-SRIOV mode diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index abdfaead1ba1..cf4b729c92d7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8217,6 +8217,27 @@ static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter, return err; } +static int ixgbe_configure_clsu32_add_hnode(struct ixgbe_adapter *adapter, + __be16 protocol, + struct tc_cls_u32_offload *cls) +{ + /* This ixgbe devices do not support hash tables at the moment + * so abort when given hash tables. + */ + if (cls->hnode.divisor > 0) + return -EINVAL; + + set_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); + return 0; +} + +static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter, + struct tc_cls_u32_offload *cls) +{ + clear_bit(TC_U32_USERHTID(cls->hnode.handle), &adapter->tables); + return 0; +} + static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, __be16 protocol, struct tc_cls_u32_offload *cls) @@ -8251,6 +8272,9 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter, struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps; u32 uhtid = TC_U32_USERHTID(cls->knode.link_handle); + if (!test_bit(uhtid, &adapter->tables)) + return -EINVAL; + for (i = 0; nexthdr[i].jump; i++) { if (nexthdr->o != cls->knode.sel->offoff || nexthdr->s != cls->knode.sel->offshift || @@ -8386,6 +8410,13 @@ int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, proto, tc->cls_u32); case TC_CLSU32_DELETE_KNODE: return ixgbe_delete_clsu32(adapter, tc->cls_u32); + case TC_CLSU32_NEW_HNODE: + case TC_CLSU32_REPLACE_HNODE: + return ixgbe_configure_clsu32_add_hnode(adapter, proto, + tc->cls_u32); + case TC_CLSU32_DELETE_HNODE: + return ixgbe_configure_clsu32_del_hnode(adapter, + tc->cls_u32); default: return -EINVAL; } -- cgit v1.2.3 From f3699b3c5752216ec1529d6bd7d13d9b1a6d1b9d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:01 -0800 Subject: i40e: trivial: drop duplicate definition The probe routine already had a u32 val declared, no need to do it again. Found by W=2 compile. Change-ID: Id7b65f6d0ef6bb71067d0557f5be0202b6d8741e Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6b158c404f43..90ad7d70bc0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11008,8 +11008,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && (pf->flags & I40E_FLAG_MSIX_ENABLED) && !test_bit(__I40E_BAD_EEPROM, &pf->state)) { - u32 val; - /* disable link interrupts for VFs */ val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM); val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK; -- cgit v1.2.3 From d89d967f7188438cf9a3ffb9ce74e9ecc2795619 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:02 -0800 Subject: i40e: trivial: fix missing space Missing space in comment, fixed. Change-ID: I8cdf3ce5994b4a97dcc3eeb33422533918546667 Reported-by: Sergei Shtylyov Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 90ad7d70bc0c..c4ad0f159537 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3754,7 +3754,7 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename) #ifdef CONFIG_NET_POLL_CONTROLLER /** - * i40e_netpoll - A Polling 'interrupt'handler + * i40e_netpoll - A Polling 'interrupt' handler * @netdev: network interface device structure * * This is used by netconsole to send skbs without having to re-enable -- cgit v1.2.3 From 3578fa0a8cd6d9fb03092c673f42aad0e7c850df Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:03 -0800 Subject: i40e: fix bug in dma sync Driver was using an offset based off a DMA handle while mapping and unmapping using sync_single_range_for[cpu|device], where it should be using DMA handle (returned from alloc_coherent) and the offset of the memory to be sync'd. Change-ID: I208256565b1595ff0e9171ab852de06b997917c6 Signed-off-by: Jesse Brandeburg Reviewed-by: Nelson, Shannon Reviewed-by: Williams, Mitch A Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 8 ++++---- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6234136fe052..f2e6afce8aae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1226,8 +1226,8 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) } dma_sync_single_range_for_device(rx_ring->dev, - bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change @@ -1542,8 +1542,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_record_rx_queue(skb, rx_ring->queue_index); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 3e692352f38d..69fb85ff346f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -700,8 +700,8 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) } dma_sync_single_range_for_device(rx_ring->dev, - bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change @@ -1007,8 +1007,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) skb_record_rx_queue(skb, rx_ring->queue_index); /* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - 0, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, rx_ring->rx_hdr_len, DMA_FROM_DEVICE); } -- cgit v1.2.3 From e9f6563d7bb3914711c6601cdf5d9f9b6e1b461f Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 4 Jan 2016 10:33:04 -0800 Subject: i40e: do TSO only if CHECKSUM_PARTIAL is set Don't bother trying to set up a TSO if the skb->ip_summed is not set to CHECKSUM_PARTIAL. Change-ID: I6495b3568e404907a2965b48cf3e2effa7c9ab55 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 +++ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f2e6afce8aae..15130b5dbe73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2221,6 +2221,9 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 l4len; int err; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 69fb85ff346f..4205aef2ffca 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1471,6 +1471,9 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u32 l4len; int err; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + if (!skb_is_gso(skb)) return 0; -- cgit v1.2.3 From 38c3cec73c7be6789a3e3ed06ff74ce84f5b1528 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:05 -0800 Subject: i40e: allocate memory safer The sync_vsi_filter function was allocating memory in such a way that it could sleep (GFP_KERNEL) which was causing a problem when called by the team driver under rcu_read_lock(), which cannot be held while sleeping. Found with lockdep. Change-ID: I4e59053cb5eedcf3d0ca151715be3dc42a94bdd5 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index c4ad0f159537..fe67b9097f35 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1937,7 +1937,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) sizeof(struct i40e_aqc_remove_macvlan_element_data); del_list_size = filter_list_len * sizeof(struct i40e_aqc_remove_macvlan_element_data); - del_list = kzalloc(del_list_size, GFP_KERNEL); + del_list = kzalloc(del_list_size, GFP_ATOMIC); if (!del_list) { i40e_cleanup_add_list(&tmp_add_list); @@ -2015,7 +2015,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) sizeof(struct i40e_aqc_add_macvlan_element_data), add_list_size = filter_list_len * sizeof(struct i40e_aqc_add_macvlan_element_data); - add_list = kzalloc(add_list_size, GFP_KERNEL); + add_list = kzalloc(add_list_size, GFP_ATOMIC); if (!add_list) { /* Purge element from temporary lists */ i40e_cleanup_add_list(&tmp_add_list); -- cgit v1.2.3 From c53934c6d1b11c676dafed04294cd906fc9ff5d3 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:06 -0800 Subject: i40e: fix: do not sleep in netdev_ops The driver was being called by VLAN, bonding, teaming operations that expected to be able to hold locks like rcu_read_lock(). This causes the driver to be held to the requirement to not sleep, and was found by the kernel debug options for checking sleep inside critical section, and the locking validator. Change-ID: Ibc68c835f5ffa8ffe0638ffe910a66fc5649a7f7 Signed-off-by: Jesse Brandeburg Reviewed-by: Nelson, Shannon Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index fe67b9097f35..ac897392ca71 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1542,7 +1542,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ether_addr_copy(netdev->dev_addr, addr->sa_data); - return i40e_sync_vsi_filters(vsi); + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); + return 0; } /** @@ -1766,6 +1770,11 @@ bottom_of_search_loop: vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; vsi->back->flags |= I40E_FLAG_FILTER_SYNC; } + + /* schedule our worker thread which will take care of + * applying the new filter changes + */ + i40e_service_event_schedule(vsi->back); } /** -- cgit v1.2.3 From 7bd6875bef7055170b2c871e6d44a0b5e826643f Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Mon, 4 Jan 2016 10:33:07 -0800 Subject: i40e: APIs to Add/remove port mirroring rules This patch implements necessary functions related to port mirroring features such as add/delete mirror rule, function to set promiscuous VLAN mode for VSI if mirror rule_type is "VLAN Mirroring". Change-ID: Iaf513fd5f188f99dcb977b48f99e73185dfddc40 Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 162 +++++++++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_prototype.h | 12 ++ 2 files changed, 174 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 9018f286e30c..976b03fe5b19 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2032,6 +2032,37 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, return status; } +/** + * i40e_aq_set_vsi_vlan_promisc - control the VLAN promiscuous setting + * @hw: pointer to the hw struct + * @seid: vsi number + * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN + * @cmd_details: pointer to command details structure or NULL + **/ +i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_vsi_promiscuous_modes *cmd = + (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw; + i40e_status status; + u16 flags = 0; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_vsi_promiscuous_modes); + if (enable) + flags |= I40E_AQC_SET_VSI_PROMISC_VLAN; + + cmd->promiscuous_flags = cpu_to_le16(flags); + cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_VLAN); + cmd->seid = cpu_to_le16(seid); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_get_vsi_params - get VSI configuration info * @hw: pointer to the hw struct @@ -2469,6 +2500,137 @@ i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 seid, return status; } +/** + * i40e_mirrorrule_op - Internal helper function to add/delete mirror rule + * @hw: pointer to the hw struct + * @opcode: AQ opcode for add or delete mirror rule + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @id: Destination VSI SEID or Rule ID + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add/Delete a mirror rule to a specific switch. Mirror rules are supported for + * VEBs/VEPA elements only + **/ +static i40e_status i40e_mirrorrule_op(struct i40e_hw *hw, + u16 opcode, u16 sw_seid, u16 rule_type, u16 id, + u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_add_delete_mirror_rule *cmd = + (struct i40e_aqc_add_delete_mirror_rule *)&desc.params.raw; + struct i40e_aqc_add_delete_mirror_rule_completion *resp = + (struct i40e_aqc_add_delete_mirror_rule_completion *)&desc.params.raw; + i40e_status status; + u16 buf_size; + + buf_size = count * sizeof(*mr_list); + + /* prep the rest of the request */ + i40e_fill_default_direct_cmd_desc(&desc, opcode); + cmd->seid = cpu_to_le16(sw_seid); + cmd->rule_type = cpu_to_le16(rule_type & + I40E_AQC_MIRROR_RULE_TYPE_MASK); + cmd->num_entries = cpu_to_le16(count); + /* Dest VSI for add, rule_id for delete */ + cmd->destination = cpu_to_le16(id); + if (mr_list) { + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | + I40E_AQ_FLAG_RD)); + if (buf_size > I40E_AQ_LARGE_BUF) + desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); + } + + status = i40e_asq_send_command(hw, &desc, mr_list, buf_size, + cmd_details); + if (!status || + hw->aq.asq_last_status == I40E_AQ_RC_ENOSPC) { + if (rule_id) + *rule_id = le16_to_cpu(resp->rule_id); + if (rules_used) + *rules_used = le16_to_cpu(resp->mirror_rules_used); + if (rules_free) + *rules_free = le16_to_cpu(resp->mirror_rules_free); + } + return status; +} + +/** + * i40e_aq_add_mirrorrule - add a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @dest_vsi: SEID of VSI to which packets will be mirrored + * @count: length of the list + * @mr_list: list of mirrored VSI SEIDs or VLAN IDs + * @cmd_details: pointer to command details structure or NULL + * @rule_id: Rule ID returned from FW + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Add mirror rule. Mirror rules are supported for VEBs or VEPA elements only + **/ +i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free) +{ + if (!(rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS || + rule_type == I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS)) { + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_add_mirror_rule, sw_seid, + rule_type, dest_vsi, count, mr_list, + cmd_details, rule_id, rules_used, rules_free); +} + +/** + * i40e_aq_delete_mirrorrule - delete a mirror rule + * @hw: pointer to the hw struct + * @sw_seid: Switch SEID (to which rule refers) + * @rule_type: Rule Type (ingress/egress/VLAN) + * @count: length of the list + * @rule_id: Rule ID that is returned in the receive desc as part of + * add_mirrorrule. + * @mr_list: list of mirrored VLAN IDs to be removed + * @cmd_details: pointer to command details structure or NULL + * @rule_used: Number of rules used in internal switch + * @rule_free: Number of rules free in internal switch + * + * Delete a mirror rule. Mirror rules are supported for VEBs/VEPA elements only + **/ +i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free) +{ + /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ + if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { + if (!rule_id) + return I40E_ERR_PARAM; + } else { + /* count and mr_list shall be valid for rule_type INGRESS VLAN + * mirroring. For other rule_type, count and rule_type should + * not matter. + */ + if (count == 0 || !mr_list) + return I40E_ERR_PARAM; + } + + return i40e_mirrorrule_op(hw, i40e_aqc_opc_delete_mirror_rule, sw_seid, + rule_type, rule_id, count, mr_list, + cmd_details, NULL, rules_used, rules_free); +} + /** * i40e_aq_send_msg_to_vf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index bb9d583e5416..45af29b8f023 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -127,6 +127,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw, + u16 seid, bool enable, + struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw, struct i40e_vsi_context *vsi_ctx, struct i40e_asq_cmd_details *cmd_details); @@ -149,6 +152,15 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 vsi_id, i40e_status i40e_aq_remove_macvlan(struct i40e_hw *hw, u16 vsi_id, struct i40e_aqc_remove_macvlan_element_data *mv_list, u16 count, struct i40e_asq_cmd_details *cmd_details); +i40e_status i40e_aq_add_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 dest_vsi, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rule_id, u16 *rules_used, u16 *rules_free); +i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, + u16 rule_type, u16 rule_id, u16 count, __le16 *mr_list, + struct i40e_asq_cmd_details *cmd_details, + u16 *rules_used, u16 *rules_free); + i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, u8 *msg, u16 msglen, struct i40e_asq_cmd_details *cmd_details); -- cgit v1.2.3 From 2f0aff4151e13fff669b1fab4b93b7613b3b4be5 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Mon, 4 Jan 2016 10:33:08 -0800 Subject: i40e: negate PHY int mask bits The PHY interrupt mask bits mask out the events we don't want, so we need to negate the bitmask of events we want. Change-ID: I273244da5a8d285b6abc84fd68a90f1e6fa0393e Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ac897392ca71..e147db23325c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -6826,12 +6826,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) if (ret) goto end_core_reset; - /* driver is only interested in link up/down and module qualification - * reports from firmware + /* The driver only wants link up/down and module qualification + * reports from firmware. Note the negative logic. */ ret = i40e_aq_set_phy_int_mask(&pf->hw, - I40E_AQ_EVENT_LINK_UPDOWN | - I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (ret) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, ret), @@ -10961,12 +10961,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } } - /* driver is only interested in link up/down and module qualification - * reports from firmware + /* The driver only wants link up/down and module qualification + * reports from firmware. Note the negative logic. */ err = i40e_aq_set_phy_int_mask(&pf->hw, - I40E_AQ_EVENT_LINK_UPDOWN | - I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL); + ~(I40E_AQ_EVENT_LINK_UPDOWN | + I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL); if (err) dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n", i40e_stat_str(&pf->hw, err), -- cgit v1.2.3 From 426bda0fe47353c16893b689e01b9c8a4d668c2d Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:09 -0800 Subject: i40e: drop unused function Delete the unused irq_dynamic_disable function. Change-ID: Ia46071066babd121c7c90f141b6210b00078de3f Signed-off-by: Jesse Brandeburg Reviewed-by: Anjali Singhai Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 - drivers/net/ethernet/intel/i40e/i40e_main.c | 16 ---------------- 2 files changed, 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 175bec33a6b9..65cf3440f7cf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -757,7 +757,6 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) /* skip the flush */ } -void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); #ifdef I40E_FCOE diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e147db23325c..38fd94217e94 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3272,22 +3272,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) i40e_flush(hw); } -/** - * i40e_irq_dynamic_disable - Disable default interrupt generation settings - * @vsi: pointer to a vsi - * @vector: disable a particular Hw Interrupt vector - **/ -void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector) -{ - struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u32 val; - - val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT; - wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); - i40e_flush(hw); -} - /** * i40e_msix_clean_rings - MSIX mode Interrupt Handler * @irq: interrupt number -- cgit v1.2.3 From c40918c3ad80e550ec68339d6816059e11648012 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:10 -0800 Subject: i40e: count allocation errors Driver already counted allocation errors, so print them as part of the ethtool -S output. Useful for debugging if your system is having trouble making memory available for the driver. Change-ID: I83839fa86e81e6d80f03b917c88dd3ef9a64dde0 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 812c95493413..9dcdf3469988 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -89,6 +89,8 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), + I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), + I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; /* These PF_STATs might look like duplicates of some NETDEV_STATs, -- cgit v1.2.3 From 45d043597d2d0e780e768866c6fbfe8dbee5f2cf Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 4 Jan 2016 10:33:11 -0800 Subject: i40e: avoid large memcpy by assigning struct Assign the i40e_pf structure directly instead of using a large memcpy, which avoids a sparse warning and lets the compiler optimize the copy since it knows the size of the structure in advance. Change-ID: I17604e23be2616521eb760290befcb767b52b3f7 Signed-off-by: Jacob Keller Reviewed-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 7aae0561c9c5..3948587a047c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -185,9 +185,11 @@ static ssize_t i40e_dbg_dump_write(struct file *filp, if (i40e_dbg_prep_dump_buf(pf, buflen)) { p = i40e_dbg_dump_buf; - len = sizeof(struct i40e_pf); - memcpy(p, pf, len); - p += len; + /* avoid use of memcpy here due to sparse warning + * about copy size. + */ + *((struct i40e_pf *)p) = *pf; + p += sizeof(struct i40e_pf); len = (sizeof(struct i40e_aq_desc) * pf->hw.aq.num_asq_entries); -- cgit v1.2.3 From 4580de0de484a33814ccf0f4ae9c91dcd828a140 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Mon, 4 Jan 2016 10:33:12 -0800 Subject: i40e/i40evf: bump version to 1.4.12/1.4.8 Bump driver versions to i40e-1.4.12 and i40evf-1.4.8 Change-ID: I0ad82668c4ded04250391fda396ce191a42ab754 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 38fd94217e94..6e12626afb61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -51,7 +51,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 12 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index ae6c6e11af83..045cc7fb4623 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 7 +#define DRV_VERSION_BUILD 8 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From a340c7895a83501df68eb403496cec74bc363157 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Date: Wed, 6 Jan 2016 11:49:28 -0800 Subject: i40e: Enable Geneve offload for FW API ver > 1.4 for XL710/X710 devices This patch makes sure we check the GENEVE offload capable flag before we attempt offload. It also enables the Capability for XL710/X710 devices with FW API version higher than 1.4 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6e12626afb61..99d7a5c096b5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5354,7 +5354,8 @@ int i40e_open(struct net_device *netdev) vxlan_get_rx_port(netdev); #endif #ifdef CONFIG_I40E_GENEVE - geneve_get_rx_port(netdev); + if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE) + geneve_get_rx_port(netdev); #endif return 0; @@ -8458,7 +8459,13 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | I40E_FLAG_100M_SGMII_CAPABLE | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + } else if ((pf->hw.aq.api_maj_ver > 1) || + ((pf->hw.aq.api_maj_ver == 1) && + (pf->hw.aq.api_min_ver > 4))) { + /* Supported in FW API version higher than 1.4 */ + pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } + pf->eeprom_version = 0xDEAD; pf->lan_veb = I40E_NO_VEB; pf->lan_vsi = I40E_NO_VSI; @@ -8672,6 +8679,9 @@ static void i40e_add_geneve_port(struct net_device *netdev, u8 next_idx; u8 idx; + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + if (sa_family == AF_INET6) return; @@ -8719,6 +8729,9 @@ static void i40e_del_geneve_port(struct net_device *netdev, if (sa_family == AF_INET6) return; + if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) + return; + idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ -- cgit v1.2.3 From 72b748698076b4609186e9554a5a890cd435b172 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 8 Jan 2016 17:50:21 -0800 Subject: i40e: add priv flag for automatic rule eviction The X722 can support automatic rule eviction for automatically added flow director rules. Feature is (should be) disabled by default. Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 9 +++++++++ drivers/net/ethernet/intel/i40e/i40e_main.c | 3 +++ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 ++++-- drivers/net/ethernet/intel/i40e/i40e_type.h | 4 ++++ 5 files changed, 21 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 65cf3440f7cf..05af33e1d810 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -104,6 +104,7 @@ #define I40E_PRIV_FLAGS_FD_ATR BIT(2) #define I40E_PRIV_FLAGS_VEB_STATS BIT(3) #define I40E_PRIV_FLAGS_PS BIT(4) +#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5) #define I40E_NVM_VERSION_LO_SHIFT 0 #define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9dcdf3469988..89ad2f749918 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -235,6 +235,7 @@ static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = { "flow-director-atr", "veb-stats", "packet-split", + "hw-atr-eviction", }; #define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings) @@ -2731,6 +2732,8 @@ static u32 i40e_get_priv_flags(struct net_device *dev) I40E_PRIV_FLAGS_VEB_STATS : 0; ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ? I40E_PRIV_FLAGS_PS : 0; + ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ? + 0 : I40E_PRIV_FLAGS_HW_ATR_EVICT; return ret_flags; } @@ -2787,6 +2790,12 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) else pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && + (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) + pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE; + else + pf->auto_disable_flags |= I40E_FLAG_HW_ATR_EVICT_CAPABLE; + /* if needed, issue reset to cause things to take effect */ if (reset_required) i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED)); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 99d7a5c096b5..f6d61cc7484a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8464,6 +8464,9 @@ static int i40e_sw_init(struct i40e_pf *pf) (pf->hw.aq.api_min_ver > 4))) { /* Supported in FW API version higher than 1.4 */ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; + } else { + pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE; } pf->eeprom_version = 0xDEAD; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 15130b5dbe73..6d1dd60c5c91 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2047,7 +2047,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Due to lack of space, no more new filters can be programmed */ if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) return; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) { + if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && + (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) { /* HW ATR eviction will take care of removing filters on FIN * and RST packets. */ @@ -2109,7 +2110,8 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK; - if (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) + if ((pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE) && + (!(pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))) dtype_cmd |= I40E_TXD_FLTR_QW1_ATR_MASK; fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index dd2da356d9a1..b59a021b7a69 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1098,6 +1098,10 @@ enum i40e_filter_program_desc_pcmd { I40E_TXD_FLTR_QW1_CMD_SHIFT) #define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \ + I40E_TXD_FLTR_QW1_CMD_SHIFT) +#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT) + #define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 #define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) -- cgit v1.2.3 From ba94272d08a7e7e716f17e491dbc9068f88eb241 Mon Sep 17 00:00:00 2001 From: Sowmini Varadhan Date: Tue, 12 Jan 2016 19:32:31 -0800 Subject: i40e: use eth_platform_get_mac_address() This commit converts commit b499ffb0a22c ("i40e: Look up MAC address in Open Firmware or IDPROM") to use eth_platform_get_mac_address() added by commit c7f5d105495a ("net: Add eth_platform_get_mac_address() helper.") Signed-off-by: Sowmini Varadhan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f6d61cc7484a..04417e61b523 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -28,11 +28,6 @@ #include #include -#ifdef CONFIG_SPARC -#include -#include -#endif - /* Local includes */ #include "i40e.h" #include "i40e_diag.h" @@ -10618,21 +10613,9 @@ static void i40e_print_features(struct i40e_pf *pf) **/ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf) { - struct device_node *dp = pci_device_to_OF_node(pdev); - const unsigned char *addr; - u8 *mac_addr = pf->hw.mac.addr; - pf->flags &= ~I40E_FLAG_PF_MAC; - addr = of_get_mac_address(dp); - if (addr) { - ether_addr_copy(mac_addr, addr); + if (!eth_platform_get_mac_address(&pdev->dev, pf->hw.mac.addr)) pf->flags |= I40E_FLAG_PF_MAC; -#ifdef CONFIG_SPARC - } else { - ether_addr_copy(mac_addr, idprom->id_ethaddr); - pf->flags |= I40E_FLAG_PF_MAC; -#endif /* CONFIG_SPARC */ - } } /** -- cgit v1.2.3 From 7009212b157d3558bacb23bb037bdba75700e3fd Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Sun, 14 Feb 2016 14:10:39 +0000 Subject: macvlan: convert to use IFF_NO_QUEUE Use IFF_NO_QUEUE to indicate that a device can run without a qdisc. Signed-off-by: Zhang Shengju Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 94e688805dd2..426a2cc27ac8 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1069,7 +1069,7 @@ EXPORT_SYMBOL_GPL(macvlan_common_setup); static void macvlan_setup(struct net_device *dev) { macvlan_common_setup(dev); - dev->tx_queue_len = 0; + dev->priv_flags |= IFF_NO_QUEUE; } static int macvlan_port_create(struct net_device *dev) -- cgit v1.2.3 From b4eee84fcd8ecae88a7d6ab437f88284f9693d9c Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:48:08 -0600 Subject: amd-xgbe: Disable VLAN filtering when in promiscuous mode When the hardware is placed in promiscuous mode it will still perform VLAN filtering and therefore may not pass all packets to the driver. Disable all VLAN filtering when entering promiscuous mode and restore VLAN filtering upon exit from promiscuous mode. In order to avoid adding forward declarations, move the VLAN related functions earlier in the file. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 229 ++++++++++++++++--------------- 1 file changed, 117 insertions(+), 112 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index f6a7161e3b85..931e3745c451 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -702,6 +702,113 @@ static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata) return 0; } +static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + /* Put the VLAN tag in the Rx descriptor */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); + + /* Don't check the VLAN type */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); + + /* Check only C-TAG (0x8100) packets */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); + + /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); + + /* Enable VLAN tag stripping */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); + + return 0; +} + +static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) +{ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); + + return 0; +} + +static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Enable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); + + /* Enable VLAN Hash Table filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); + + /* Disable VLAN tag inverse matching */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); + + /* Only filter on the lower 12-bits of the VLAN tag */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); + + /* In order for the VLAN Hash Table filtering to be effective, + * the VLAN tag identifier in the VLAN Tag Register must not + * be zero. Set the VLAN tag identifier to "1" to enable the + * VLAN Hash Table filtering. This implies that a VLAN tag of + * 1 will always pass filtering. + */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); + + return 0; +} + +static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) +{ + /* Disable VLAN filtering */ + XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); + + return 0; +} + +static u32 xgbe_vid_crc32_le(__le16 vid_le) +{ + u32 poly = 0xedb88320; /* CRCPOLY_LE */ + u32 crc = ~0; + u32 temp = 0; + unsigned char *data = (unsigned char *)&vid_le; + unsigned char data_byte = 0; + int i, bits; + + bits = get_bitmask_order(VLAN_VID_MASK); + for (i = 0; i < bits; i++) { + if ((i % 8) == 0) + data_byte = data[i / 8]; + + temp = ((crc & 1) ^ data_byte) & 1; + crc >>= 1; + data_byte >>= 1; + + if (temp) + crc ^= poly; + } + + return crc; +} + +static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) +{ + u32 crc; + u16 vid; + __le16 vid_le; + u16 vlan_hash_table = 0; + + /* Generate the VLAN Hash Table value */ + for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { + /* Get the CRC32 value of the VLAN ID */ + vid_le = cpu_to_le16(vid); + crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; + + vlan_hash_table |= (1 << crc); + } + + /* Set the VLAN Hash Table filtering register */ + XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); + + return 0; +} + static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable) { @@ -714,6 +821,14 @@ static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, enable ? "entering" : "leaving"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val); + /* Hardware will still perform VLAN filtering in promiscuous mode */ + if (enable) { + xgbe_disable_rx_vlan_filtering(pdata); + } else { + if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER) + xgbe_enable_rx_vlan_filtering(pdata); + } + return 0; } @@ -944,116 +1059,6 @@ static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata) return 0; } -static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata) -{ - /* Put the VLAN tag in the Rx descriptor */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); - - /* Don't check the VLAN type */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); - - /* Check only C-TAG (0x8100) packets */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); - - /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); - - /* Enable VLAN tag stripping */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); - - return 0; -} - -static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata) -{ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); - - return 0; -} - -static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata) -{ - /* Enable VLAN filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); - - /* Enable VLAN Hash Table filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); - - /* Disable VLAN tag inverse matching */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); - - /* Only filter on the lower 12-bits of the VLAN tag */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); - - /* In order for the VLAN Hash Table filtering to be effective, - * the VLAN tag identifier in the VLAN Tag Register must not - * be zero. Set the VLAN tag identifier to "1" to enable the - * VLAN Hash Table filtering. This implies that a VLAN tag of - * 1 will always pass filtering. - */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); - - return 0; -} - -static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata) -{ - /* Disable VLAN filtering */ - XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); - - return 0; -} - -#ifndef CRCPOLY_LE -#define CRCPOLY_LE 0xedb88320 -#endif -static u32 xgbe_vid_crc32_le(__le16 vid_le) -{ - u32 poly = CRCPOLY_LE; - u32 crc = ~0; - u32 temp = 0; - unsigned char *data = (unsigned char *)&vid_le; - unsigned char data_byte = 0; - int i, bits; - - bits = get_bitmask_order(VLAN_VID_MASK); - for (i = 0; i < bits; i++) { - if ((i % 8) == 0) - data_byte = data[i / 8]; - - temp = ((crc & 1) ^ data_byte) & 1; - crc >>= 1; - data_byte >>= 1; - - if (temp) - crc ^= poly; - } - - return crc; -} - -static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata) -{ - u32 crc; - u16 vid; - __le16 vid_le; - u16 vlan_hash_table = 0; - - /* Generate the VLAN Hash Table value */ - for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) { - /* Get the CRC32 value of the VLAN ID */ - vid_le = cpu_to_le16(vid); - crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28; - - vlan_hash_table |= (1 << crc); - } - - /* Set the VLAN Hash Table filtering register */ - XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); - - return 0; -} - static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata) { struct xgbe_ring_desc *rdesc = rdata->rdesc; -- cgit v1.2.3 From 491aefb38a31bb163e1e4393ba42abf68ce4c6eb Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:48:19 -0600 Subject: amd-xgbe: Change from napi_complete to napi_complete_done Change from using napi_complete to napi_complete_done to allow for the use of gro_flush_timeout in tuning network processing. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index cfd3f7efda1c..5a4d9ca369d7 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -2068,7 +2068,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget) /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ - napi_complete(napi); + napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ enable_irq(channel->dma_irq); @@ -2110,7 +2110,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget) /* If we processed everything, we are done */ if (processed < budget) { /* Turn off polling */ - napi_complete(napi); + napi_complete_done(napi, processed); /* Enable Tx and Rx interrupts */ xgbe_enable_rx_tx_ints(pdata); -- cgit v1.2.3 From 793494228fdf15acb252063f9f6dcccd3d1d497e Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:48:29 -0600 Subject: amd-xgbe: Use __napi_schedule_irqoff Change from calling __napi_schedule to __napi_schedule_irqoff when running in interrupt context or when called by netpoll with interrupts already disabled. The Tx timer function will continue to use __napi_schedule. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 5a4d9ca369d7..80ef4041e7a3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -356,7 +356,7 @@ static irqreturn_t xgbe_isr(int irq, void *data) xgbe_disable_rx_tx_ints(pdata); /* Turn on polling */ - __napi_schedule(&pdata->napi); + __napi_schedule_irqoff(&pdata->napi); } } @@ -409,7 +409,7 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data) disable_irq_nosync(channel->dma_irq); /* Turn on polling */ - __napi_schedule(&channel->napi); + __napi_schedule_irqoff(&channel->napi); } return IRQ_HANDLED; -- cgit v1.2.3 From 757e6aa34ca9529786877d3045684e7c9f686075 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:48:39 -0600 Subject: amd-xgbe: Verify forced speed matches the active speedset When using ethtool to set the speed for the device, verify that the specified speed is valid within the active speedset. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 6040293db9c1..11d9f0c5b78b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -318,8 +318,20 @@ static int xgbe_set_settings(struct net_device *netdev, if (cmd->autoneg == AUTONEG_DISABLE) { switch (speed) { case SPEED_10000: + break; case SPEED_2500: + if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) { + netdev_err(netdev, "unsupported speed %u\n", + speed); + return -EINVAL; + } + break; case SPEED_1000: + if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) { + netdev_err(netdev, "unsupported speed %u\n", + speed); + return -EINVAL; + } break; default: netdev_err(netdev, "unsupported speed %u\n", speed); -- cgit v1.2.3 From 8dba2a2a88397dec6bdcae8bf7ceeefd62fd39fc Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:48:48 -0600 Subject: amd-xgbe: Enable/disable PFC per traffic class Currently the PFC flow control is enabled on all traffic classes if one or more traffic classes request it. The PFC enable setting of the traffic class should be used to determine whether to enable or disable flow control for the traffic class. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 36 ++++++++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 931e3745c451..6b98a99fbfa4 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -518,13 +518,45 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { + struct ieee_pfc *pfc = pdata->pfc; + struct ieee_ets *ets = pdata->ets; unsigned int max_q_count, q_count; unsigned int reg, reg_val; unsigned int i; /* Set MTL flow control */ - for (i = 0; i < pdata->rx_q_count; i++) - XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1); + for (i = 0; i < pdata->rx_q_count; i++) { + unsigned int ehfc = 0; + + if (pfc && ets) { + unsigned int prio; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + unsigned int tc; + + /* Does this queue handle the priority? */ + if (pdata->prio2q_map[prio] != i) + continue; + + /* Get the Traffic Class for this priority */ + tc = ets->prio_tc[prio]; + + /* Check if flow control should be enabled */ + if (pfc->pfc_en & (1 << tc)) { + ehfc = 1; + break; + } + } + } else { + ehfc = 1; + } + + XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); + + netif_dbg(pdata, drv, pdata->netdev, + "flow control %s for RXq%u\n", + ehfc ? "enabled" : "disabled", i); + } /* Set MAC flow control */ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; -- cgit v1.2.3 From 6a49ee4e1b9eb8747ff79c5438a42c0726aaa03b Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:48:57 -0600 Subject: amd-xgbe: Fix the mapping of priorities to traffic classes The driver is checking the pfc_en field of the ieee_pfc structure to determine whether to associate a priority with a traffic class. This is incorrect since the pfc_en field is for determining if PFC is enabled for a traffic class. The association of priority to traffic class does not depend on whether the traffic class is enabled for PFC, so remove that check. Also, the mapping of priorities to traffic classes should be done when configuring the traffic classes and not the PFC support so move the priority to traffic class association from xgbe_config_dcb_pfc to xgbe_config_dcb_tc. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 50 ++++++++++++++------------------ 1 file changed, 21 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 6b98a99fbfa4..67d234eb1655 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1329,7 +1329,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) { struct ieee_ets *ets = pdata->ets; unsigned int total_weight, min_weight, weight; - unsigned int i; + unsigned int mask, reg, reg_val; + unsigned int i, prio; if (!ets) return; @@ -1346,6 +1347,25 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) min_weight = 1; for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { + /* Map the priorities to the traffic class */ + mask = 0; + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { + if (ets->prio_tc[prio] == i) + mask |= (1 << prio); + } + mask &= 0xff; + + netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n", + i, mask); + reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG)); + reg_val = XGMAC_IOREAD(pdata, reg); + + reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3)); + reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3)); + + XGMAC_IOWRITE(pdata, reg, reg_val); + + /* Set the traffic class algorithm */ switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: netif_dbg(pdata, drv, pdata->netdev, @@ -1370,34 +1390,6 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) { - struct ieee_pfc *pfc = pdata->pfc; - struct ieee_ets *ets = pdata->ets; - unsigned int mask, reg, reg_val; - unsigned int tc, prio; - - if (!pfc || !ets) - return; - - for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) { - mask = 0; - for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) { - if ((pfc->pfc_en & (1 << prio)) && - (ets->prio_tc[prio] == tc)) - mask |= (1 << prio); - } - mask &= 0xff; - - netif_dbg(pdata, drv, pdata->netdev, "TC%u PFC mask=%#x\n", - tc, mask); - reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG)); - reg_val = XGMAC_IOREAD(pdata, reg); - - reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3)); - reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3)); - - XGMAC_IOWRITE(pdata, reg, reg_val); - } - xgbe_config_flow_control(pdata); } -- cgit v1.2.3 From b3b715974bfe69f626d6a633b8c96590de1b7338 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:49:08 -0600 Subject: amd-xgbe: Do traffic class setup when called through dcbnl Currently the netdev traffic class setup is only performed when invoked through the ndo_setup_tc interface. However, the same setup should be performed when the dcbnl interface (ieee_setets) is invoked. Rework the netdev traffic class setup to be invokable through either interface and also provide the priority to traffic class mapping if available. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | 39 ++++++++++++++++++++++++-------- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 33 +++++++++++++++++++++++++++ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 22 ++++-------------- drivers/net/ethernet/amd/xgbe/xgbe.h | 6 +++-- 4 files changed, 70 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c index a6b9899e285f..895d35639129 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -146,6 +146,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, { struct xgbe_prv_data *pdata = netdev_priv(netdev); unsigned int i, tc_ets, tc_ets_weight; + u8 max_tc = 0; tc_ets = 0; tc_ets_weight = 0; @@ -157,12 +158,9 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]); - if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && - (i >= pdata->hw_feat.tc_cnt)) - return -EINVAL; - - if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt) - return -EINVAL; + max_tc = max_t(u8, max_tc, ets->prio_tc[i]); + if ((ets->tc_tx_bw[i] || ets->tc_tsa[i])) + max_tc = max_t(u8, max_tc, i); switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: @@ -171,15 +169,28 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, tc_ets = 1; tc_ets_weight += ets->tc_tx_bw[i]; break; - default: + netif_err(pdata, drv, netdev, + "unsupported TSA algorithm (%hhu)\n", + ets->tc_tsa[i]); return -EINVAL; } } + /* Check maximum traffic class requested */ + if (max_tc >= pdata->hw_feat.tc_cnt) { + netif_err(pdata, drv, netdev, + "exceeded number of supported traffic classes\n"); + return -EINVAL; + } + /* Weights must add up to 100% */ - if (tc_ets && (tc_ets_weight != 100)) + if (tc_ets && (tc_ets_weight != 100)) { + netif_err(pdata, drv, netdev, + "sum of ETS algorithm weights is not 100 (%u)\n", + tc_ets_weight); return -EINVAL; + } if (!pdata->ets) { pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets), @@ -188,6 +199,7 @@ static int xgbe_dcb_ieee_setets(struct net_device *netdev, return -ENOMEM; } + pdata->num_tcs = max_tc + 1; memcpy(pdata->ets, ets, sizeof(*pdata->ets)); pdata->hw_if.config_dcb_tc(pdata); @@ -221,6 +233,13 @@ static int xgbe_dcb_ieee_setpfc(struct net_device *netdev, "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n", pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay); + /* Check PFC for supported number of traffic classes */ + if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) { + netif_err(pdata, drv, netdev, + "PFC requested for unsupported traffic class\n"); + return -EINVAL; + } + if (!pdata->pfc) { pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc), GFP_KERNEL); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 67d234eb1655..43273c9823aa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1325,6 +1325,36 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata, return 0; } +static void xgbe_config_tc(struct xgbe_prv_data *pdata) +{ + unsigned int offset, queue, prio; + u8 i; + + netdev_reset_tc(pdata->netdev); + if (!pdata->num_tcs) + return; + + netdev_set_num_tc(pdata->netdev, pdata->num_tcs); + + for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) { + while ((queue < pdata->tx_q_count) && + (pdata->q2tc_map[queue] == i)) + queue++; + + netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n", + i, offset, queue - 1); + netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset); + offset = queue; + } + + if (!pdata->ets) + return; + + for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) + netdev_set_prio_tc_map(pdata->netdev, prio, + pdata->ets->prio_tc[prio]); +} + static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) { struct ieee_ets *ets = pdata->ets; @@ -1386,6 +1416,8 @@ static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata) break; } } + + xgbe_config_tc(pdata); } static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata) @@ -2910,6 +2942,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if) hw_if->get_tx_tstamp = xgbe_get_tx_tstamp; /* For Data Center Bridging config */ + hw_if->config_tc = xgbe_config_tc; hw_if->config_dcb_tc = xgbe_config_dcb_tc; hw_if->config_dcb_pfc = xgbe_config_dcb_pfc; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 80ef4041e7a3..33606840ae15 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1630,32 +1630,18 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct tc_to_netdev *tc_to_netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - unsigned int offset, queue; - u8 i, tc; + u8 tc; if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO) return -EINVAL; tc = tc_to_netdev->tc; - if (tc && (tc != pdata->hw_feat.tc_cnt)) + if (tc > pdata->hw_feat.tc_cnt) return -EINVAL; - if (tc) { - netdev_set_num_tc(netdev, tc); - for (i = 0, queue = 0, offset = 0; i < tc; i++) { - while ((queue < pdata->tx_q_count) && - (pdata->q2tc_map[queue] == i)) - queue++; - - netif_dbg(pdata, drv, netdev, "TC%u using TXq%u-%u\n", - i, offset, queue - 1); - netdev_set_tc_queue(netdev, i, queue - offset, offset); - offset = queue; - } - } else { - netdev_reset_tc(netdev); - } + pdata->num_tcs = tc; + pdata->hw_if.config_tc(pdata); return 0; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index e234b9970318..ca2835485450 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -673,6 +673,7 @@ struct xgbe_hw_if { u64 (*get_tx_tstamp)(struct xgbe_prv_data *); /* For Data Center Bridging config */ + void (*config_tc)(struct xgbe_prv_data *); void (*config_dcb_tc)(struct xgbe_prv_data *); void (*config_dcb_pfc)(struct xgbe_prv_data *); @@ -880,6 +881,7 @@ struct xgbe_prv_data { struct ieee_pfc *pfc; unsigned int q2tc_map[XGBE_MAX_QUEUES]; unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS]; + u8 num_tcs; /* Hardware features of the device */ struct xgbe_hw_features hw_feat; -- cgit v1.2.3 From c3727d61ea73a1df60bacd0b416e01282a94240b Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:49:16 -0600 Subject: amd-xgbe: Check Rx queue fifos before stopping Rx DMA Check to be sure that the Rx queue fifos are empty before stopping the Rx DMA channels. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 10 +++++++--- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 30 +++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index b6fa89102526..bbef95973c27 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -768,12 +768,16 @@ #define MTL_Q_TQDR 0x08 #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 -#define MTL_Q_RQDR 0x4c +#define MTL_Q_RQDR 0x48 #define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ +#define MTL_Q_RQDR_PRXQ_INDEX 16 +#define MTL_Q_RQDR_PRXQ_WIDTH 14 +#define MTL_Q_RQDR_RXQSTS_INDEX 4 +#define MTL_Q_RQDR_RXQSTS_WIDTH 2 #define MTL_Q_RQFCR_RFA_INDEX 1 #define MTL_Q_RQFCR_RFA_WIDTH 6 #define MTL_Q_RQFCR_RFD_INDEX 17 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 43273c9823aa..b48c6eca3e6b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -2656,6 +2656,32 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) } } +static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, + unsigned int queue) +{ + unsigned int rx_status; + unsigned long rx_timeout; + + /* The Rx engine cannot be stopped if it is actively processing + * packets. Wait for the Rx queue to empty the Rx fifo. Don't + * wait forever though... + */ + rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ); + while (time_before(jiffies, rx_timeout)) { + rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); + if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && + (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) + break; + + usleep_range(500, 1000); + } + + if (!time_before(jiffies, rx_timeout)) + netdev_info(pdata->netdev, + "timed out waiting for Rx queue %u to empty\n", + queue); +} + static void xgbe_enable_rx(struct xgbe_prv_data *pdata) { struct xgbe_channel *channel; @@ -2694,6 +2720,10 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata) XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); + /* Prepare for Rx DMA channel stop */ + for (i = 0; i < pdata->rx_q_count; i++) + xgbe_prepare_rx_stop(pdata, i); + /* Disable each Rx queue */ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); -- cgit v1.2.3 From ced3fcae693b563b20ee8d2dba966760e6b771d4 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 17 Feb 2016 11:49:28 -0600 Subject: amd-xgbe: Mask auto-negotiation interrupts in ISR Currently the auto-negotiation interrupt handling disables the irq instead of masking off the interrupts. This was done because the phy library was originally used to read and write the PCS registers, which could not be performed in interrupt context. Now that the phy library is no longer used to read and write the PCS registers the interrupts can be masked off in the interrupt service routine eliminating the need to call disable_irq/enable_irq. This also requires changing the protection mutex to a spinlock. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 10 +++--- drivers/net/ethernet/amd/xgbe/xgbe-main.c | 6 ++-- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 53 +++++++++++++++++-------------- drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +-- 4 files changed, 41 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index b48c6eca3e6b..1babcc11a248 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -1022,6 +1022,7 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata) static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg) { + unsigned long flags; unsigned int mmd_address; int mmd_data; @@ -1039,10 +1040,10 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ - mutex_lock(&pdata->xpcs_mutex); + spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2); - mutex_unlock(&pdata->xpcs_mutex); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); return mmd_data; } @@ -1051,6 +1052,7 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg, int mmd_data) { unsigned int mmd_address; + unsigned long flags; if (mmd_reg & MII_ADDR_C45) mmd_address = mmd_reg & ~MII_ADDR_C45; @@ -1066,10 +1068,10 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, * register offsets must therefore be adjusted by left shifting the * offset 2 bits and reading 32 bits of data. */ - mutex_lock(&pdata->xpcs_mutex); + spin_lock_irqsave(&pdata->xpcs_lock, flags); XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8); XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data); - mutex_unlock(&pdata->xpcs_mutex); + spin_unlock_irqrestore(&pdata->xpcs_lock, flags); } static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 618d952c2984..3eee3201b58f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -363,7 +363,7 @@ static int xgbe_probe(struct platform_device *pdev) platform_set_drvdata(pdev, netdev); spin_lock_init(&pdata->lock); - mutex_init(&pdata->xpcs_mutex); + spin_lock_init(&pdata->xpcs_lock); mutex_init(&pdata->rss_mutex); spin_lock_init(&pdata->tstamp_lock); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 446058081866..84c5d296d13e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -6,7 +6,7 @@ * * License 1: GPLv2 * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by @@ -56,7 +56,7 @@ * * License 2: Modified BSD * - * Copyright (c) 2014 Advanced Micro Devices, Inc. + * Copyright (c) 2014-2016 Advanced Micro Devices, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -626,10 +626,22 @@ static irqreturn_t xgbe_an_isr(int irq, void *data) netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n"); - /* Interrupt reason must be read and cleared outside of IRQ context */ - disable_irq_nosync(pdata->an_irq); + /* Disable AN interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); + + /* Save the interrupt(s) that fired */ + pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); - queue_work(pdata->an_workqueue, &pdata->an_irq_work); + if (pdata->an_int) { + /* Clear the interrupt(s) that fired and process them */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); + + queue_work(pdata->an_workqueue, &pdata->an_irq_work); + } else { + /* Enable AN interrupts */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, + XGBE_AN_INT_MASK); + } return IRQ_HANDLED; } @@ -673,34 +685,26 @@ static void xgbe_an_state_machine(struct work_struct *work) struct xgbe_prv_data, an_work); enum xgbe_an cur_state = pdata->an_state; - unsigned int int_reg, int_mask; mutex_lock(&pdata->an_mutex); - /* Read the interrupt */ - int_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); - if (!int_reg) + if (!pdata->an_int) goto out; next_int: - if (int_reg & XGBE_AN_PG_RCV) { + if (pdata->an_int & XGBE_AN_PG_RCV) { pdata->an_state = XGBE_AN_PAGE_RECEIVED; - int_mask = XGBE_AN_PG_RCV; - } else if (int_reg & XGBE_AN_INC_LINK) { + pdata->an_int &= ~XGBE_AN_PG_RCV; + } else if (pdata->an_int & XGBE_AN_INC_LINK) { pdata->an_state = XGBE_AN_INCOMPAT_LINK; - int_mask = XGBE_AN_INC_LINK; - } else if (int_reg & XGBE_AN_INT_CMPLT) { + pdata->an_int &= ~XGBE_AN_INC_LINK; + } else if (pdata->an_int & XGBE_AN_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; - int_mask = XGBE_AN_INT_CMPLT; + pdata->an_int &= ~XGBE_AN_INT_CMPLT; } else { pdata->an_state = XGBE_AN_ERROR; - int_mask = 0; } - /* Clear the interrupt to be processed */ - int_reg &= ~int_mask; - XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, int_reg); - pdata->an_result = pdata->an_state; again: @@ -740,14 +744,14 @@ again: } if (pdata->an_state == XGBE_AN_NO_LINK) { - int_reg = 0; + pdata->an_int = 0; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } else if (pdata->an_state == XGBE_AN_ERROR) { netdev_err(pdata->netdev, "error during auto-negotiation, state=%u\n", cur_state); - int_reg = 0; + pdata->an_int = 0; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } @@ -765,11 +769,12 @@ again: if (cur_state != pdata->an_state) goto again; - if (int_reg) + if (pdata->an_int) goto next_int; out: - enable_irq(pdata->an_irq); + /* Enable AN interrupts on the way out */ + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK); mutex_unlock(&pdata->an_mutex); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ca2835485450..98d9d63c4353 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -774,8 +774,8 @@ struct xgbe_prv_data { /* Overall device lock */ spinlock_t lock; - /* XPCS indirect addressing mutex */ - struct mutex xpcs_mutex; + /* XPCS indirect addressing lock */ + spinlock_t xpcs_lock; /* RSS addressing mutex */ struct mutex rss_mutex; @@ -927,6 +927,7 @@ struct xgbe_prv_data { u32 serdes_dfe_tap_ena[XGBE_SPEEDS]; /* Auto-negotiation state machine support */ + unsigned int an_int; struct mutex an_mutex; enum xgbe_an an_result; enum xgbe_an an_state; -- cgit v1.2.3 From bb64b035f8a8474c79c3554b921c136558bc18c5 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 14 Feb 2016 22:56:03 +0300 Subject: ravb: kill useless *switch* defaults The driver has the *default* case doing nothing in the *switch* statement with an integer expression -- remove it. Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index c936682aae68..331c5969dca4 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -92,8 +92,6 @@ static void ravb_set_rate(struct net_device *ndev) case 1000: /* 1000BASE */ ravb_write(ndev, GECMR_SPEED_1000, GECMR); break; - default: - break; } } -- cgit v1.2.3 From 82c133093c56c2b11b7a4cf9c18f494b7b7c0191 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 14 Feb 2016 22:56:33 +0300 Subject: sh_eth: kill useless *switch* defaults The driver often has the *default* cases doing nothing in the *switch* statements with the integer expressions -- remove them. Signed-off-by: Sergei Shtylyov Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 0a150b228914..a2767336b7c5 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -500,8 +500,6 @@ static void sh_eth_set_rate_gether(struct net_device *ndev) case 1000: /* 1000BASE */ sh_eth_write(ndev, GECMR_1000, GECMR); break; - default: - break; } } @@ -592,8 +590,6 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev) case 100:/* 100BASE */ sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB); break; - default: - break; } } @@ -658,8 +654,6 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev) case 100:/* 100BASE */ sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM); break; - default: - break; } } @@ -698,8 +692,6 @@ static void sh_eth_set_rate_sh7757(struct net_device *ndev) case 100:/* 100BASE */ sh_eth_write(ndev, 1, RTRATE); break; - default: - break; } } @@ -767,8 +759,6 @@ static void sh_eth_set_rate_giga(struct net_device *ndev) case 1000: /* 1000BASE */ sh_eth_write(ndev, 0x00000020, GECMR); break; - default: - break; } } @@ -2917,8 +2907,6 @@ static const u16 *sh_eth_get_register_offset(int register_type) case SH_ETH_REG_FAST_SH3_SH2: reg_offset = sh_eth_offset_fast_sh3_sh2; break; - default: - break; } return reg_offset; -- cgit v1.2.3 From fc48b7a6148af974b49db145812a8b060324a503 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Mon, 15 Feb 2016 13:22:35 -0500 Subject: qed/qede: use 8.7.3.0 FW. This patch moves the qed* driver into utilizing the 8.7.3.0 FW. This new FW is required for a lot of new SW features, including: - Vlan filtering offload - Encapsulation offload support - HW ingress aggregations As well as paving the way for the possibility of adding storage protocols in the future. V2: - Fix kbuild test robot error/warnings. Signed-off-by: Yuval Mintz Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 43 +- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 3 +- drivers/net/ethernet/qlogic/qed/qed_dev.c | 88 +- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2690 +++++++++----------- .../net/ethernet/qlogic/qed/qed_init_fw_funcs.c | 22 +- drivers/net/ethernet/qlogic/qed/qed_init_ops.c | 155 +- drivers/net/ethernet/qlogic/qed/qed_l2.c | 13 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 37 +- drivers/net/ethernet/qlogic/qed/qed_sp.h | 2 +- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 17 +- drivers/net/ethernet/qlogic/qede/qede.h | 8 +- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 6 +- drivers/net/ethernet/qlogic/qede/qede_main.c | 264 +- include/linux/qed/common_hsi.h | 36 +- include/linux/qed/eth_common.h | 171 +- include/linux/qed/qed_if.h | 8 +- 17 files changed, 1795 insertions(+), 1770 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 1292c360390c..d34da638b5d5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -70,8 +70,8 @@ struct qed_sb_sp_info; struct qed_mcp_info; struct qed_rt_data { - u32 init_val; - bool b_valid; + u32 *init_val; + bool *b_valid; }; /* The PCI personality is not quite synonymous to protocol ID: @@ -120,6 +120,10 @@ enum QED_PORT_MODE { QED_PORT_MODE_DE_1X25G }; +enum qed_dev_cap { + QED_DEV_CAP_ETH, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -151,6 +155,7 @@ struct qed_hw_info { u32 port_mode; u32 hw_mode; + unsigned long device_capabilities; }; struct qed_hw_cid_data { @@ -267,7 +272,7 @@ struct qed_hwfn { struct qed_hw_info hw_info; /* rt_array (for init-tool) */ - struct qed_rt_data *rt_data; + struct qed_rt_data rt_data; /* SPQ */ struct qed_spq *p_spq; @@ -350,9 +355,20 @@ struct qed_dev { char name[NAME_SIZE]; u8 type; -#define QED_DEV_TYPE_BB_A0 (0 << 0) -#define QED_DEV_TYPE_MASK (0x3) -#define QED_DEV_TYPE_SHIFT (0) +#define QED_DEV_TYPE_BB (0 << 0) +#define QED_DEV_TYPE_AH BIT(0) +/* Translate type/revision combo into the proper conditions */ +#define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB) +#define QED_IS_BB_A0(dev) (QED_IS_BB(dev) && \ + CHIP_REV_IS_A0(dev)) +#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && \ + CHIP_REV_IS_B0(dev)) + +#define QED_GET_TYPE(dev) (QED_IS_BB_A0(dev) ? CHIP_BB_A0 : \ + QED_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) + + u16 vendor_id; + u16 device_id; u16 chip_num; #define CHIP_NUM_MASK 0xffff @@ -361,6 +377,8 @@ struct qed_dev { u16 chip_rev; #define CHIP_REV_MASK 0xf #define CHIP_REV_SHIFT 12 +#define CHIP_REV_IS_A0(_cdev) (!(_cdev)->chip_rev) +#define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1) u16 chip_metal; #define CHIP_METAL_MASK 0xff @@ -375,10 +393,10 @@ struct qed_dev { u8 num_funcs_in_port; u8 path_id; - enum mf_mode mf_mode; -#define IS_MF(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode != SF) -#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_NPAR) -#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == MF_OVLAN) + enum qed_mf_mode mf_mode; +#define IS_MF_DEFAULT(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_DEFAULT) +#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_NPAR) +#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->cdev)->mf_mode == QED_MF_OVLAN) int pcie_width; int pcie_speed; @@ -441,11 +459,6 @@ struct qed_dev { const struct firmware *firmware; }; -#define QED_GET_TYPE(dev) (((dev)->type & QED_DEV_TYPE_MASK) >> \ - QED_DEV_TYPE_SHIFT) -#define QED_IS_BB_A0(dev) (QED_GET_TYPE(dev) == QED_DEV_TYPE_BB_A0) -#define QED_IS_BB(dev) (QED_IS_BB_A0(dev)) - #define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB #define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index 7ccdb46c6764..d3f7a0215e7e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -581,7 +581,8 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn) params.num_pf_cids = iids.cids; params.start_pq = qm_info->start_pq; params.num_pf_pqs = qm_info->num_pqs; - params.start_vport = qm_info->num_vports; + params.start_vport = qm_info->start_vport; + params.num_vports = qm_info->num_vports; params.pf_wfq = qm_info->pf_wfq; params.pf_rl = qm_info->pf_rl; params.pq_params = qm_info->qm_pq_params; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 817bbd5476ff..bc17ed2c9cac 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -341,11 +341,6 @@ void qed_resc_setup(struct qed_dev *cdev) } } -#define FINAL_CLEANUP_CMD_OFFSET (0) -#define FINAL_CLEANUP_CMD (0x1) -#define FINAL_CLEANUP_VALID_OFFSET (6) -#define FINAL_CLEANUP_VFPF_ID_SHIFT (7) -#define FINAL_CLEANUP_COMP (0x2) #define FINAL_CLEANUP_POLL_CNT (100) #define FINAL_CLEANUP_POLL_TIME (10) int qed_final_cleanup(struct qed_hwfn *p_hwfn, @@ -355,12 +350,14 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn, u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; int rc = -EBUSY; - addr = GTT_BAR0_MAP_REG_USDM_RAM + USTORM_FLR_FINAL_ACK_OFFSET; + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); - command |= FINAL_CLEANUP_CMD << FINAL_CLEANUP_CMD_OFFSET; - command |= 1 << FINAL_CLEANUP_VALID_OFFSET; - command |= id << FINAL_CLEANUP_VFPF_ID_SHIFT; - command |= FINAL_CLEANUP_COMP << SDM_OP_GEN_COMP_TYPE_SHIFT; + command |= X_FINAL_CLEANUP_AGG_INT << + SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; + command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; + command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; + command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; /* Make sure notification is not set before initiating final cleanup */ if (REG_RD(p_hwfn, addr)) { @@ -415,18 +412,16 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) } switch (p_hwfn->cdev->mf_mode) { - case SF: - hw_mode |= 1 << MODE_SF; + case QED_MF_DEFAULT: + case QED_MF_NPAR: + hw_mode |= 1 << MODE_MF_SI; break; - case MF_OVLAN: + case QED_MF_OVLAN: hw_mode |= 1 << MODE_MF_SD; break; - case MF_NPAR: - hw_mode |= 1 << MODE_MF_SI; - break; default: - DP_NOTICE(p_hwfn, "Unsupported MF mode, init as SF\n"); - hw_mode |= 1 << MODE_SF; + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + hw_mode |= 1 << MODE_MF_SI; } hw_mode |= 1 << MODE_ASIC; @@ -1018,8 +1013,7 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) u32 *resc_num = p_hwfn->hw_info.resc_num; int num_funcs, i; - num_funcs = IS_MF(p_hwfn) ? MAX_NUM_PFS_BB - : p_hwfn->cdev->num_ports_in_engines; + num_funcs = MAX_NUM_PFS_BB; resc_num[QED_SB] = min_t(u32, (MAX_SB_PER_PATH_BB / num_funcs), @@ -1071,7 +1065,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; - u32 port_cfg_addr, link_temp, val, nvm_cfg_addr; + u32 port_cfg_addr, link_temp, nvm_cfg_addr, device_capabilities; struct qed_mcp_link_params *link; /* Read global nvm_cfg address */ @@ -1134,21 +1128,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, break; } - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, func[MCP_PF_ID(p_hwfn)]) + - offsetof(struct nvm_cfg1_func, device_id); - val = qed_rd(p_hwfn, p_ptt, addr); - - if (IS_MF(p_hwfn)) { - p_hwfn->hw_info.device_id = - (val & NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK) >> - NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET; - } else { - p_hwfn->hw_info.device_id = - (val & NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK) >> - NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET; - } - /* Read default link configuration */ link = &p_hwfn->mcp_info->link_input; port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + @@ -1220,18 +1199,28 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, switch (mf_mode) { case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: - p_hwfn->cdev->mf_mode = MF_OVLAN; + p_hwfn->cdev->mf_mode = QED_MF_OVLAN; break; case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: - p_hwfn->cdev->mf_mode = MF_NPAR; + p_hwfn->cdev->mf_mode = QED_MF_NPAR; break; - case NVM_CFG1_GLOB_MF_MODE_FORCED_SF: - p_hwfn->cdev->mf_mode = SF; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + p_hwfn->cdev->mf_mode = QED_MF_DEFAULT; break; } DP_INFO(p_hwfn, "Multi function mode is %08x\n", p_hwfn->cdev->mf_mode); + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + offsetof(struct nvm_cfg1, glob) + + offsetof(struct nvm_cfg1_glob, device_capabilities); + + device_capabilities = qed_rd(p_hwfn, p_ptt, addr); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) + __set_bit(QED_DEV_CAP_ETH, + &p_hwfn->hw_info.device_capabilities); + return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt); } @@ -1293,29 +1282,36 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, static void qed_get_dev_info(struct qed_dev *cdev) { + struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); u32 tmp; - cdev->chip_num = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + /* Read Vendor Id / Device Id */ + pci_read_config_word(cdev->pdev, PCI_VENDOR_ID, + &cdev->vendor_id); + pci_read_config_word(cdev->pdev, PCI_DEVICE_ID, + &cdev->device_id); + cdev->chip_num = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_NUM); - cdev->chip_rev = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_rev = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_REV); MASK_FIELD(CHIP_REV, cdev->chip_rev); + cdev->type = QED_DEV_TYPE_BB; /* Learn number of HW-functions */ - tmp = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + tmp = qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CMT_ENABLED_FOR_PAIR); - if (tmp & (1 << cdev->hwfns[0].rel_pf_id)) { + if (tmp & (1 << p_hwfn->rel_pf_id)) { DP_NOTICE(cdev->hwfns, "device in CMT mode\n"); cdev->num_hwfns = 2; } else { cdev->num_hwfns = 1; } - cdev->chip_bond_id = qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_bond_id = qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_TEST_REG) >> 4; MASK_FIELD(CHIP_BOND_ID, cdev->chip_bond_id); - cdev->chip_metal = (u16)qed_rd(cdev->hwfns, cdev->hwfns[0].p_main_ptt, + cdev->chip_metal = (u16)qed_rd(p_hwfn, p_hwfn->p_main_ptt, MISCS_REG_CHIP_METAL); MASK_FIELD(CHIP_METAL, cdev->chip_metal); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 264e954675d1..49bbf696a16d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -34,6 +34,8 @@ enum common_event_opcode { COMMON_EVENT_RESERVED3, COMMON_EVENT_RESERVED4, COMMON_EVENT_RESERVED5, + COMMON_EVENT_RESERVED6, + COMMON_EVENT_EMPTY, MAX_COMMON_EVENT_OPCODE }; @@ -45,6 +47,7 @@ enum common_ramrod_cmd_id { COMMON_RAMROD_RESERVED, COMMON_RAMROD_RESERVED2, COMMON_RAMROD_RESERVED3, + COMMON_RAMROD_EMPTY, MAX_COMMON_RAMROD_CMD_ID }; @@ -331,6 +334,179 @@ struct xstorm_core_conn_ag_ctx { __le16 word15 /* word15 */; }; +struct tstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 word1 /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 word1 /* word1 */; + __le32 rx_producers /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; +}; + /* The core storm context for the Mstorm */ struct mstorm_core_conn_st_ctx { __le32 reserved[24]; @@ -349,8 +525,9 @@ struct core_conn_context { struct regpair pstorm_st_padding[2]; struct xstorm_core_conn_st_ctx xstorm_st_context; struct xstorm_core_conn_ag_ctx xstorm_ag_context; + struct tstorm_core_conn_ag_ctx tstorm_ag_context; + struct ustorm_core_conn_ag_ctx ustorm_ag_context; struct mstorm_core_conn_st_ctx mstorm_st_context; - struct regpair mstorm_st_padding[2]; struct ustorm_core_conn_st_ctx ustorm_st_context; struct regpair ustorm_st_padding[2] /* padding */; }; @@ -397,10 +574,12 @@ union event_ring_element { }; enum personality_type { + BAD_PERSONALITY_TYP, PERSONALITY_RESERVED, PERSONALITY_RESERVED2, PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */, PERSONALITY_RESERVED3, + PERSONALITY_CORE, PERSONALITY_ETH /* Ethernet */, PERSONALITY_RESERVED4, MAX_PERSONALITY_TYPE @@ -570,7 +749,7 @@ enum block_addr { GRCBASE_NWM = 0x800000, GRCBASE_NWS = 0x700000, GRCBASE_MS = 0x6a0000, - GRCBASE_PHY_PCIE = 0x618000, + GRCBASE_PHY_PCIE = 0x620000, GRCBASE_MISC_AEU = 0x8000, GRCBASE_BAR0_MAP = 0x1c00000, MAX_BLOCK_ADDR @@ -795,13 +974,13 @@ enum init_modes { MODE_RESERVED3, MODE_RESERVED4, MODE_RESERVED5, + MODE_RESERVED6, MODE_SF, MODE_MF_SD, MODE_MF_SI, MODE_PORTS_PER_ENG_1, MODE_PORTS_PER_ENG_2, MODE_PORTS_PER_ENG_4, - MODE_40G, MODE_100G, MODE_EAGLE_ENG1_WORKAROUND, MAX_INIT_MODES @@ -816,43 +995,6 @@ enum init_phases { MAX_INIT_PHASES }; -struct mstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0 /* word0 */; - __le16 word1 /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; -}; - /* per encapsulation type enabling flags */ struct prs_reg_encapsulation_type_en { u8 flags; @@ -927,250 +1069,44 @@ struct qm_rf_opportunistic_mask { }; /* QM hardware structure of QM map memory */ -struct qm_rf_pq_map { - u32 reg; -#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ -#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 -#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ -#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 -#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF -#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 -#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ -#define QM_RF_PQ_MAP_VOQ_SHIFT 18 -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ -#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 -#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ -#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 -#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F -#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 -}; - -/* SDM operation gen command (generate aggregative interrupt) */ -struct sdm_op_gen { - __le32 command; -#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */ -#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 -#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ -#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 -#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ -#define SDM_OP_GEN_RESERVED_SHIFT 20 -}; - -struct tstorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 word1 /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; +struct qm_rf_pq_map { + u32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */ +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */ +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */ +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */ +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */ +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 }; -struct ustorm_core_conn_ag_ctx { - u8 reserved /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 - u8 flags2; -#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 word1 /* word1 */; - __le32 rx_producers /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; +/* Completion params for aggregated interrupt completion */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 }; -struct ystorm_core_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* word0 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le16 word1 /* word1 */; - __le16 word2 /* word2 */; - __le16 word3 /* word3 */; - __le16 word4 /* word4 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; +/* SDM operation gen command (generate aggregative interrupt) */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */ +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */ +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */ +#define SDM_OP_GEN_RESERVED_SHIFT 20 }; /*********************************** Init ************************************/ @@ -1274,13 +1210,6 @@ enum chip_ids { MAX_CHIP_IDS }; -enum idle_chk_severity_types { - IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */, - IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC, - IDLE_CHK_SEVERITY_WARNING, - MAX_IDLE_CHK_SEVERITY_TYPES -}; - struct init_array_raw_hdr { __le32 data; #define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF @@ -1340,14 +1269,6 @@ struct init_callback_op { __le16 block_id /* Blocks ID */; }; -/* init comparison types */ -enum init_comparison_types { - INIT_COMPARISON_EQ /* init value is included in the init command */, - INIT_COMPARISON_OR /* init value is all zeros */, - INIT_COMPARISON_AND /* init value is an array of values */, - MAX_INIT_COMPARISON_TYPES -}; - /* init operation: delay */ struct init_delay_op { __le32 op_data; @@ -1444,12 +1365,10 @@ struct init_read_op { __le32 op_data; #define INIT_READ_OP_OP_MASK 0xF #define INIT_READ_OP_OP_SHIFT 0 -#define INIT_READ_OP_POLL_COMP_MASK 0x7 -#define INIT_READ_OP_POLL_COMP_SHIFT 4 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 #define INIT_READ_OP_RESERVED_MASK 0x1 -#define INIT_READ_OP_RESERVED_SHIFT 7 -#define INIT_READ_OP_POLL_MASK 0x1 -#define INIT_READ_OP_POLL_SHIFT 8 +#define INIT_READ_OP_RESERVED_SHIFT 8 #define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF #define INIT_READ_OP_ADDRESS_SHIFT 9 __le32 expected_val; @@ -1477,6 +1396,14 @@ enum init_op_types { MAX_INIT_OP_TYPES }; +enum init_poll_types { + INIT_POLL_NONE /* No polling */, + INIT_POLL_EQ /* init value is included in the init command */, + INIT_POLL_OR /* init value is all zeros */, + INIT_POLL_AND /* init value is an array of values */, + MAX_INIT_POLL_TYPES +}; + /* init source types */ enum init_source_types { INIT_SRC_INLINE /* init value is included in the init command */, @@ -1677,175 +1604,213 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, u16 num_pqs); /* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ -#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) -#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) /* Tstorm port statistics */ -#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + \ - ((port_id) * \ - IRO[1].m1)) -#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +/* Tstorm ll2 port statistics */ +#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \ + (IRO[2].base + ((port_id) * IRO[2].m1)) +#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size) /* Ustorm VF-PF Channel ready flag */ -#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) (IRO[2].base + \ - ((vf_id) * \ - IRO[2].m1)) -#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[2].size) +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ + (IRO[3].base + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) /* Ustorm Final flr cleanup ack */ -#define USTORM_FLR_FINAL_ACK_OFFSET (IRO[3].base) -#define USTORM_FLR_FINAL_ACK_SIZE (IRO[3].size) +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) /* Ustorm Event ring consumer */ -#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[4].base + \ - ((pf_id) * \ - IRO[4].m1)) -#define USTORM_EQE_CONS_SIZE (IRO[4].size) -/* Ustorm Completion ring consumer */ -#define USTORM_CQ_CONS_OFFSET(global_queue_id) (IRO[5].base + \ - ((global_queue_id) * \ - IRO[5].m1)) -#define USTORM_CQ_CONS_SIZE (IRO[5].size) +#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ + (IRO[6].base + ((global_queue_id) * IRO[6].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) /* Xstorm Integration Test Data */ -#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[6].base) -#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[6].size) +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) /* Ystorm Integration Test Data */ -#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) -#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) /* Pstorm Integration Test Data */ -#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) -#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) /* Tstorm Integration Test Data */ -#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) -#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) /* Mstorm Integration Test Data */ -#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) -#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) /* Ustorm Integration Test Data */ -#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) -#define USTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) /* Tstorm producers */ -#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) (IRO[12].base + \ - ((core_rx_queue_id) * \ - IRO[12].m1)) -#define TSTORM_LL2_RX_PRODS_SIZE (IRO[12].size) -/* Tstorm LiteL2 queue statistics */ -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[13].base + \ - ((core_rx_q_id) * \ - IRO[13].m1)) -#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[13].size) +#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \ + (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1)) +#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size) +/* Tstorm LightL2 queue statistics */ +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1)) +#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) /* Ustorm LiteL2 queue statistics */ -#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_q_id) (IRO[14].base + \ - ((core_rx_q_id) * \ - IRO[14].m1)) -#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[14].size) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \ + (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1)) +#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) /* Pstorm LiteL2 queue statistics */ -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_txst_id) (IRO[15].base + \ - ((core_txst_id) * \ - IRO[15].m1)) -#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[15].size) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \ + (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1)) +#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size) /* Mstorm queue statistics */ -#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[16].base + \ - ((stat_counter_id) * \ - IRO[16].m1)) -#define MSTORM_QUEUE_STAT_SIZE (IRO[16].size) +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[17].base + ((stat_counter_id) * IRO[17].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) /* Mstorm producers */ -#define MSTORM_PRODS_OFFSET(queue_id) (IRO[17].base + \ - ((queue_id) * \ - IRO[17].m1)) -#define MSTORM_PRODS_SIZE (IRO[17].size) +#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1)) +#define MSTORM_PRODS_SIZE (IRO[18].size) /* TPA agregation timeout in us resolution (on ASIC) */ -#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[18].base) -#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[18].size) +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) /* Ustorm queue statistics */ -#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[19].base + \ - ((stat_counter_id) * \ - IRO[19].m1)) -#define USTORM_QUEUE_STAT_SIZE (IRO[19].size) +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[20].base + ((stat_counter_id) * IRO[20].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) /* Ustorm queue zone */ -#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[20].base + \ - ((queue_id) * \ - IRO[20].m1)) -#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[20].size) +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[21].base + ((queue_id) * IRO[21].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) /* Pstorm queue statistics */ -#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) (IRO[21].base + \ - ((stat_counter_id) * \ - IRO[21].m1)) -#define PSTORM_QUEUE_STAT_SIZE (IRO[21].size) +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ + (IRO[22].base + ((stat_counter_id) * IRO[22].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) /* Tstorm last parser message */ -#define TSTORM_ETH_PRS_INPUT_OFFSET(pf_id) (IRO[22].base + \ - ((pf_id) * \ - IRO[22].m1)) -#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[22].size) +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) /* Ystorm queue zone */ -#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) (IRO[23].base + \ - ((queue_id) * \ - IRO[23].m1)) -#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[23].size) +#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ + (IRO[25].base + ((queue_id) * IRO[25].m1)) +#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) /* Ystorm cqe producer */ -#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[24].base + \ - ((rss_id) * \ - IRO[24].m1)) -#define YSTORM_TOE_CQ_PROD_SIZE (IRO[24].size) +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[26].base + ((rss_id) * IRO[26].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) /* Ustorm cqe producer */ -#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) (IRO[25].base + \ - ((rss_id) * \ - IRO[25].m1)) -#define USTORM_TOE_CQ_PROD_SIZE (IRO[25].size) +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ + (IRO[27].base + ((rss_id) * IRO[27].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) /* Ustorm grq producer */ -#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) (IRO[26].base + \ - ((pf_id) * \ - IRO[26].m1)) -#define USTORM_TOE_GRQ_PROD_SIZE (IRO[26].size) +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ + (IRO[28].base + ((pf_id) * IRO[28].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) /* Tstorm cmdq-cons of given command queue-id */ -#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) (IRO[27].base + \ - ((cmdq_queue_id) * \ - IRO[27].m1)) -#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[27].size) +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ + (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size) /* Mstorm rq-cons of given queue-id */ -#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) (IRO[28].base + \ - ((rq_queue_id) * \ - IRO[28].m1)) -#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[28].size) +#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \ + (IRO[30].base + ((rq_queue_id) * IRO[30].m1)) +#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size) +/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size) +/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */ +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ + (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size) +/* Tstorm iSCSI RX stats */ +#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[33].base + ((pf_id) * IRO[33].m1)) +#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size) +/* Mstorm iSCSI RX stats */ +#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[34].base + ((pf_id) * IRO[34].m1)) +#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size) +/* Ustorm iSCSI RX stats */ +#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \ + (IRO[35].base + ((pf_id) * IRO[35].m1)) +#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size) +/* Xstorm iSCSI TX stats */ +#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[36].base + ((pf_id) * IRO[36].m1)) +#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size) +/* Ystorm iSCSI TX stats */ +#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[37].base + ((pf_id) * IRO[37].m1)) +#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size) +/* Pstorm iSCSI TX stats */ +#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \ + (IRO[38].base + ((pf_id) * IRO[38].m1)) +#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size) +/* Tstorm FCoE RX stats */ +#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[39].base + ((pf_id) * IRO[39].m1)) +#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size) +/* Mstorm FCoE RX stats */ +#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \ + (IRO[40].base + ((pf_id) * IRO[40].m1)) +#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size) +/* Pstorm FCoE TX stats */ +#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \ + (IRO[41].base + ((pf_id) * IRO[41].m1)) +#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size) /* Pstorm RoCE statistics */ -#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[29].base + \ - ((stat_counter_id) * \ - IRO[29].m1)) -#define PSTORM_ROCE_STAT_SIZE (IRO[29].size) +#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ + (IRO[42].base + ((stat_counter_id) * IRO[42].m1)) +#define PSTORM_ROCE_STAT_SIZE (IRO[42].size) /* Tstorm RoCE statistics */ -#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) (IRO[30].base + \ - ((stat_counter_id) * \ - IRO[30].m1)) -#define TSTORM_ROCE_STAT_SIZE (IRO[30].size) - -static const struct iro iro_arr[31] = { - { 0x10, 0x0, 0x0, 0x0, 0x8 }, - { 0x4448, 0x60, 0x0, 0x0, 0x60 }, - { 0x498, 0x8, 0x0, 0x0, 0x4 }, - { 0x494, 0x0, 0x0, 0x0, 0x4 }, - { 0x10, 0x8, 0x0, 0x0, 0x2 }, - { 0x90, 0x8, 0x0, 0x0, 0x2 }, - { 0x4540, 0x0, 0x0, 0x0, 0xf8 }, - { 0x39e0, 0x0, 0x0, 0x0, 0xf8 }, - { 0x2598, 0x0, 0x0, 0x0, 0xf8 }, - { 0x4350, 0x0, 0x0, 0x0, 0xf8 }, - { 0x52d0, 0x0, 0x0, 0x0, 0xf8 }, - { 0x7a48, 0x0, 0x0, 0x0, 0xf8 }, - { 0x100, 0x8, 0x0, 0x0, 0x8 }, - { 0x5808, 0x10, 0x0, 0x0, 0x10 }, - { 0xb100, 0x30, 0x0, 0x0, 0x30 }, - { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, - { 0x54f8, 0x40, 0x0, 0x0, 0x40 }, - { 0x200, 0x10, 0x0, 0x0, 0x8 }, - { 0x9e70, 0x0, 0x0, 0x0, 0x4 }, - { 0x7ca0, 0x40, 0x0, 0x0, 0x30 }, - { 0xd00, 0x8, 0x0, 0x0, 0x8 }, - { 0x2790, 0x80, 0x0, 0x0, 0x38 }, - { 0xa520, 0xf0, 0x0, 0x0, 0xf0 }, - { 0x80, 0x8, 0x0, 0x0, 0x8 }, - { 0xac0, 0x8, 0x0, 0x0, 0x8 }, - { 0x2580, 0x8, 0x0, 0x0, 0x8 }, - { 0x2500, 0x8, 0x0, 0x0, 0x8 }, - { 0x440, 0x8, 0x0, 0x0, 0x2 }, - { 0x1800, 0x8, 0x0, 0x0, 0x2 }, - { 0x27c8, 0x80, 0x0, 0x0, 0x10 }, - { 0x4710, 0x10, 0x0, 0x0, 0x10 }, +#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \ + (IRO[43].base + ((stat_counter_id) * IRO[43].m1)) +#define TSTORM_ROCE_STAT_SIZE (IRO[43].size) + +static const struct iro iro_arr[44] = { + { 0x10, 0x0, 0x0, 0x0, 0x8 }, + { 0x47c8, 0x60, 0x0, 0x0, 0x60 }, + { 0x5e30, 0x20, 0x0, 0x0, 0x20 }, + { 0x510, 0x8, 0x0, 0x0, 0x4 }, + { 0x490, 0x8, 0x0, 0x0, 0x4 }, + { 0x10, 0x8, 0x0, 0x0, 0x2 }, + { 0x90, 0x8, 0x0, 0x0, 0x2 }, + { 0x4940, 0x0, 0x0, 0x0, 0x78 }, + { 0x3de0, 0x0, 0x0, 0x0, 0x78 }, + { 0x2998, 0x0, 0x0, 0x0, 0x78 }, + { 0x4750, 0x0, 0x0, 0x0, 0x78 }, + { 0x56d0, 0x0, 0x0, 0x0, 0x78 }, + { 0x7e50, 0x0, 0x0, 0x0, 0x78 }, + { 0x100, 0x8, 0x0, 0x0, 0x8 }, + { 0x5c10, 0x10, 0x0, 0x0, 0x10 }, + { 0xb508, 0x30, 0x0, 0x0, 0x30 }, + { 0x95c0, 0x30, 0x0, 0x0, 0x30 }, + { 0x58a0, 0x40, 0x0, 0x0, 0x40 }, + { 0x200, 0x10, 0x0, 0x0, 0x8 }, + { 0xa230, 0x0, 0x0, 0x0, 0x4 }, + { 0x8058, 0x40, 0x0, 0x0, 0x30 }, + { 0xd00, 0x8, 0x0, 0x0, 0x8 }, + { 0x2b30, 0x80, 0x0, 0x0, 0x38 }, + { 0xa808, 0x0, 0x0, 0x0, 0xf0 }, + { 0xa8f8, 0x8, 0x0, 0x0, 0x8 }, + { 0x80, 0x8, 0x0, 0x0, 0x8 }, + { 0xac0, 0x8, 0x0, 0x0, 0x8 }, + { 0x2580, 0x8, 0x0, 0x0, 0x8 }, + { 0x2500, 0x8, 0x0, 0x0, 0x8 }, + { 0x440, 0x8, 0x0, 0x0, 0x2 }, + { 0x1800, 0x8, 0x0, 0x0, 0x2 }, + { 0x1a00, 0x10, 0x8, 0x0, 0x2 }, + { 0x640, 0x10, 0x8, 0x0, 0x2 }, + { 0xd9b8, 0x38, 0x0, 0x0, 0x24 }, + { 0x11048, 0x10, 0x0, 0x0, 0x8 }, + { 0x11678, 0x38, 0x0, 0x0, 0x18 }, + { 0xaec0, 0x30, 0x0, 0x0, 0x10 }, + { 0x8700, 0x28, 0x0, 0x0, 0x18 }, + { 0xec00, 0x10, 0x0, 0x0, 0x10 }, + { 0xde38, 0x40, 0x0, 0x0, 0x30 }, + { 0x121a8, 0x38, 0x0, 0x0, 0x8 }, + { 0xf068, 0x20, 0x0, 0x0, 0x20 }, + { 0x2b68, 0x80, 0x0, 0x0, 0x10 }, + { 0x4ab8, 0x10, 0x0, 0x0, 0x10 }, }; /* Runtime array offsets */ @@ -1866,426 +1831,427 @@ static const struct iro iro_arr[31] = { #define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 #define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 #define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 -#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 17 -#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 18 -#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 19 -#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 20 -#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 21 -#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 22 -#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 23 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 #define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 760 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 #define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 -#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1496 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 #define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 -#define CAU_REG_PI_MEMORY_RT_OFFSET 2232 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 #define CAU_REG_PI_MEMORY_RT_SIZE 4416 -#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6648 -#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6649 -#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6650 -#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6651 -#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6652 -#define PRS_REG_SEARCH_TCP_RT_OFFSET 6653 -#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6654 -#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6655 -#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6656 -#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6657 -#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6658 -#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6659 -#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6660 -#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6661 -#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6662 -#define SRC_REG_FIRSTFREE_RT_OFFSET 6663 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655 +#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656 +#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657 +#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 #define SRC_REG_FIRSTFREE_RT_SIZE 2 -#define SRC_REG_LASTFREE_RT_OFFSET 6665 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 #define SRC_REG_LASTFREE_RT_SIZE 2 -#define SRC_REG_COUNTFREE_RT_OFFSET 6667 -#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6668 -#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6669 -#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6670 -#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6671 -#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6672 -#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6673 -#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6674 -#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6675 -#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6676 -#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6677 -#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6678 -#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6679 -#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6680 -#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6681 -#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6682 -#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6683 -#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6684 -#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6685 -#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6686 -#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6687 -#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 -#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6689 -#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6690 -#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6691 -#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6692 -#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6693 -#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6694 -#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6695 -#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6696 -#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6697 -#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6698 -#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6699 -#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6700 -#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6701 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703 #define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 -#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28701 -#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28702 -#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28703 -#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28704 -#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28705 -#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28706 -#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28707 -#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28708 -#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28709 -#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28710 -#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28711 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713 #define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 -#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29127 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129 #define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 -#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29639 -#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29640 -#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29641 -#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29642 -#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29643 -#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29644 -#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29645 -#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29646 -#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29647 -#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29648 -#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29649 -#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29650 -#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29651 -#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29652 -#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29653 -#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29654 -#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29655 -#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29656 -#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29657 -#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29658 -#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29659 -#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29660 -#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29661 -#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29662 -#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29663 -#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29664 -#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29665 -#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29666 -#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29667 -#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29668 -#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29669 -#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29670 -#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29671 -#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29672 -#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29673 -#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29674 -#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29675 -#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29676 -#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29677 -#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29678 -#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29679 -#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29680 -#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29681 -#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29682 -#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29683 -#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29684 -#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29685 -#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29686 -#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29687 -#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29688 -#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29689 -#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29690 -#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29691 -#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29692 -#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29693 -#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29694 -#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29695 -#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29696 -#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29697 -#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29698 -#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29699 -#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29700 -#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29701 -#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29702 -#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29703 -#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29704 -#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29705 -#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708 #define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 -#define QM_REG_VOQCRDLINE_RT_OFFSET 29834 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29836 #define QM_REG_VOQCRDLINE_RT_SIZE 20 -#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29854 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856 #define QM_REG_VOQINITCRDLINE_RT_SIZE 20 -#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29874 -#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29875 -#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29876 -#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29877 -#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29878 -#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29879 -#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29880 -#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29881 -#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29882 -#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29883 -#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29884 -#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29885 -#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29886 -#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29887 -#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29888 -#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29889 -#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29890 -#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29891 -#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29892 -#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29893 -#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29894 -#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29895 -#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29896 -#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29897 -#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29898 -#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29899 -#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29900 -#define QM_REG_PQTX2PF_0_RT_OFFSET 29901 -#define QM_REG_PQTX2PF_1_RT_OFFSET 29902 -#define QM_REG_PQTX2PF_2_RT_OFFSET 29903 -#define QM_REG_PQTX2PF_3_RT_OFFSET 29904 -#define QM_REG_PQTX2PF_4_RT_OFFSET 29905 -#define QM_REG_PQTX2PF_5_RT_OFFSET 29906 -#define QM_REG_PQTX2PF_6_RT_OFFSET 29907 -#define QM_REG_PQTX2PF_7_RT_OFFSET 29908 -#define QM_REG_PQTX2PF_8_RT_OFFSET 29909 -#define QM_REG_PQTX2PF_9_RT_OFFSET 29910 -#define QM_REG_PQTX2PF_10_RT_OFFSET 29911 -#define QM_REG_PQTX2PF_11_RT_OFFSET 29912 -#define QM_REG_PQTX2PF_12_RT_OFFSET 29913 -#define QM_REG_PQTX2PF_13_RT_OFFSET 29914 -#define QM_REG_PQTX2PF_14_RT_OFFSET 29915 -#define QM_REG_PQTX2PF_15_RT_OFFSET 29916 -#define QM_REG_PQTX2PF_16_RT_OFFSET 29917 -#define QM_REG_PQTX2PF_17_RT_OFFSET 29918 -#define QM_REG_PQTX2PF_18_RT_OFFSET 29919 -#define QM_REG_PQTX2PF_19_RT_OFFSET 29920 -#define QM_REG_PQTX2PF_20_RT_OFFSET 29921 -#define QM_REG_PQTX2PF_21_RT_OFFSET 29922 -#define QM_REG_PQTX2PF_22_RT_OFFSET 29923 -#define QM_REG_PQTX2PF_23_RT_OFFSET 29924 -#define QM_REG_PQTX2PF_24_RT_OFFSET 29925 -#define QM_REG_PQTX2PF_25_RT_OFFSET 29926 -#define QM_REG_PQTX2PF_26_RT_OFFSET 29927 -#define QM_REG_PQTX2PF_27_RT_OFFSET 29928 -#define QM_REG_PQTX2PF_28_RT_OFFSET 29929 -#define QM_REG_PQTX2PF_29_RT_OFFSET 29930 -#define QM_REG_PQTX2PF_30_RT_OFFSET 29931 -#define QM_REG_PQTX2PF_31_RT_OFFSET 29932 -#define QM_REG_PQTX2PF_32_RT_OFFSET 29933 -#define QM_REG_PQTX2PF_33_RT_OFFSET 29934 -#define QM_REG_PQTX2PF_34_RT_OFFSET 29935 -#define QM_REG_PQTX2PF_35_RT_OFFSET 29936 -#define QM_REG_PQTX2PF_36_RT_OFFSET 29937 -#define QM_REG_PQTX2PF_37_RT_OFFSET 29938 -#define QM_REG_PQTX2PF_38_RT_OFFSET 29939 -#define QM_REG_PQTX2PF_39_RT_OFFSET 29940 -#define QM_REG_PQTX2PF_40_RT_OFFSET 29941 -#define QM_REG_PQTX2PF_41_RT_OFFSET 29942 -#define QM_REG_PQTX2PF_42_RT_OFFSET 29943 -#define QM_REG_PQTX2PF_43_RT_OFFSET 29944 -#define QM_REG_PQTX2PF_44_RT_OFFSET 29945 -#define QM_REG_PQTX2PF_45_RT_OFFSET 29946 -#define QM_REG_PQTX2PF_46_RT_OFFSET 29947 -#define QM_REG_PQTX2PF_47_RT_OFFSET 29948 -#define QM_REG_PQTX2PF_48_RT_OFFSET 29949 -#define QM_REG_PQTX2PF_49_RT_OFFSET 29950 -#define QM_REG_PQTX2PF_50_RT_OFFSET 29951 -#define QM_REG_PQTX2PF_51_RT_OFFSET 29952 -#define QM_REG_PQTX2PF_52_RT_OFFSET 29953 -#define QM_REG_PQTX2PF_53_RT_OFFSET 29954 -#define QM_REG_PQTX2PF_54_RT_OFFSET 29955 -#define QM_REG_PQTX2PF_55_RT_OFFSET 29956 -#define QM_REG_PQTX2PF_56_RT_OFFSET 29957 -#define QM_REG_PQTX2PF_57_RT_OFFSET 29958 -#define QM_REG_PQTX2PF_58_RT_OFFSET 29959 -#define QM_REG_PQTX2PF_59_RT_OFFSET 29960 -#define QM_REG_PQTX2PF_60_RT_OFFSET 29961 -#define QM_REG_PQTX2PF_61_RT_OFFSET 29962 -#define QM_REG_PQTX2PF_62_RT_OFFSET 29963 -#define QM_REG_PQTX2PF_63_RT_OFFSET 29964 -#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29965 -#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29966 -#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29967 -#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29968 -#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29969 -#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29970 -#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29971 -#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29972 -#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29973 -#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29974 -#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29975 -#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29976 -#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29977 -#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29978 -#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29979 -#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29980 -#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29981 -#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29982 -#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29983 -#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29984 -#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29985 -#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29986 -#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29987 -#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29988 -#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29989 -#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29990 -#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29991 -#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29992 -#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29993 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29966 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995 #define QM_REG_RLGLBLINCVAL_RT_SIZE 256 -#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30249 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251 #define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 -#define QM_REG_RLGLBLCRD_RT_OFFSET 30505 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30507 #define QM_REG_RLGLBLCRD_RT_SIZE 256 -#define QM_REG_RLGLBLENABLE_RT_OFFSET 30761 -#define QM_REG_RLPFPERIOD_RT_OFFSET 30762 -#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30763 -#define QM_REG_RLPFINCVAL_RT_OFFSET 30764 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30764 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30766 #define QM_REG_RLPFINCVAL_RT_SIZE 16 -#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30780 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782 #define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_RLPFCRD_RT_OFFSET 30796 +#define QM_REG_RLPFCRD_RT_OFFSET 30798 #define QM_REG_RLPFCRD_RT_SIZE 16 -#define QM_REG_RLPFENABLE_RT_OFFSET 30812 -#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30813 -#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30814 +#define QM_REG_RLPFENABLE_RT_OFFSET 30814 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816 #define QM_REG_WFQPFWEIGHT_RT_SIZE 16 -#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30830 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832 #define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 -#define QM_REG_WFQPFCRD_RT_OFFSET 30846 +#define QM_REG_WFQPFCRD_RT_OFFSET 30848 #define QM_REG_WFQPFCRD_RT_SIZE 160 -#define QM_REG_WFQPFENABLE_RT_OFFSET 31006 -#define QM_REG_WFQVPENABLE_RT_OFFSET 31007 -#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31008 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31008 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31009 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010 #define QM_REG_BASEADDRTXPQ_RT_SIZE 512 -#define QM_REG_TXPQMAP_RT_OFFSET 31520 +#define QM_REG_TXPQMAP_RT_OFFSET 31522 #define QM_REG_TXPQMAP_RT_SIZE 512 -#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32032 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034 #define QM_REG_WFQVPWEIGHT_RT_SIZE 512 -#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32544 -#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512 -#define QM_REG_WFQVPCRD_RT_OFFSET 33056 +#define QM_REG_WFQVPCRD_RT_OFFSET 32546 #define QM_REG_WFQVPCRD_RT_SIZE 512 -#define QM_REG_WFQVPMAP_RT_OFFSET 33568 +#define QM_REG_WFQVPMAP_RT_OFFSET 33058 #define QM_REG_WFQVPMAP_RT_SIZE 512 -#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34080 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570 #define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 -#define NIG_REG_LLH_CLS_TYPE_DUALMODE_RT_OFFSET 34240 -#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34241 -#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34242 -#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34243 -#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34244 -#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 34245 -#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34246 -#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34247 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737 #define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 34251 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741 #define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34255 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745 #define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 -#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 34259 -#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34260 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750 #define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 -#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34292 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782 #define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34308 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798 #define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34324 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814 #define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 -#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34340 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830 #define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 -#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34356 -#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34357 -#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34358 -#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34359 -#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34360 -#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34361 -#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34362 -#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34363 -#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34364 -#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34365 -#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34366 -#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34367 -#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34368 -#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34369 -#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34370 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34371 -#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34372 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34373 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34374 -#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34375 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34376 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34377 -#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34378 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34379 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34380 -#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34381 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34382 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34383 -#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34384 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34385 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34386 -#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34387 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34388 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34389 -#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34390 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34391 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34392 -#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34393 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34394 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34395 -#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34396 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34397 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34398 -#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34399 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34400 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34401 -#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34402 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34403 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34404 -#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34405 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34406 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34407 -#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34408 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34409 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34410 -#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34411 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34412 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34413 -#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34414 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34415 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34416 -#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34417 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34418 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34419 -#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34420 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34421 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34422 -#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34423 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34424 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34425 -#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34426 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34427 -#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34428 -#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34429 -#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34430 -#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34431 - -#define RUNTIME_ARRAY_SIZE 34432 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922 + +#define RUNTIME_ARRAY_SIZE 33923 -/* The eth storm context for the Ystorm */ -struct ystorm_eth_conn_st_ctx { +/* The eth storm context for the Tstorm */ +struct tstorm_eth_conn_st_ctx { __le32 reserved[4]; }; @@ -2535,41 +2501,253 @@ struct xstorm_eth_conn_ag_ctx { __le32 reg7 /* reg7 */; __le32 reg8 /* reg8 */; __le32 reg9 /* reg9 */; - u8 byte7 /* byte7 */; - u8 byte8 /* byte8 */; - u8 byte9 /* byte9 */; - u8 byte10 /* byte10 */; - u8 byte11 /* byte11 */; - u8 byte12 /* byte12 */; - u8 byte13 /* byte13 */; - u8 byte14 /* byte14 */; - u8 byte15 /* byte15 */; - u8 byte16 /* byte16 */; - __le16 word11 /* word11 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 byte16 /* byte16 */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +/* The eth storm context for the Ystorm */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +struct ystorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */ +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 terminate_spqe /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 tx_bd_cons_upd /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 rx_bd_cons /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 rx_bd_prod /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; __le32 reg10 /* reg10 */; - __le32 reg11 /* reg11 */; - __le32 reg12 /* reg12 */; - __le32 reg13 /* reg13 */; - __le32 reg14 /* reg14 */; - __le32 reg15 /* reg15 */; - __le32 reg16 /* reg16 */; - __le32 reg17 /* reg17 */; - __le32 reg18 /* reg18 */; - __le32 reg19 /* reg19 */; - __le16 word12 /* word12 */; - __le16 word13 /* word13 */; - __le16 word14 /* word14 */; - __le16 word15 /* word15 */; -}; - -/* The eth storm context for the Tstorm */ -struct tstorm_eth_conn_st_ctx { - __le32 reserved[4]; }; -/* The eth storm context for the Mstorm */ -struct mstorm_eth_conn_st_ctx { - __le32 reserved[8]; +struct ustorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */ +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */ +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 tx_bd_cons /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 tx_int_coallecing_timeset /* reg3 */; + __le16 tx_drv_bd_cons /* word2 */; + __le16 rx_drv_cqe_cons /* word3 */; }; /* The eth storm context for the Ustorm */ @@ -2577,24 +2755,30 @@ struct ustorm_eth_conn_st_ctx { __le32 reserved[40]; }; +/* The eth storm context for the Mstorm */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + /* eth connection context */ struct eth_conn_context { - struct ystorm_eth_conn_st_ctx ystorm_st_context; - struct regpair ystorm_st_padding[2] /* padding */; + struct tstorm_eth_conn_st_ctx tstorm_st_context; + struct regpair tstorm_st_padding[2]; struct pstorm_eth_conn_st_ctx pstorm_st_context; - struct regpair pstorm_st_padding[2] /* padding */; struct xstorm_eth_conn_st_ctx xstorm_st_context; struct xstorm_eth_conn_ag_ctx xstorm_ag_context; - struct tstorm_eth_conn_st_ctx tstorm_st_context; - struct regpair tstorm_st_padding[2] /* padding */; - struct mstorm_eth_conn_st_ctx mstorm_st_context; + struct ystorm_eth_conn_st_ctx ystorm_st_context; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context; struct ustorm_eth_conn_st_ctx ustorm_st_context; + struct mstorm_eth_conn_st_ctx mstorm_st_context; }; enum eth_filter_action { ETH_FILTER_ACTION_REMOVE, ETH_FILTER_ACTION_ADD, - ETH_FILTER_ACTION_REPLACE, + ETH_FILTER_ACTION_REMOVE_ALL, MAX_ETH_FILTER_ACTION }; @@ -2653,6 +2837,32 @@ enum eth_ramrod_cmd_id { MAX_ETH_RAMROD_CMD_ID }; +enum eth_tx_err { + ETH_TX_ERR_DROP /* Drop erronous packet. */, + ETH_TX_ERR_ASSERT_MALICIOUS, + MAX_ETH_TX_ERR +}; + +struct eth_tx_err_vals { + __le16 values; +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + struct eth_vport_rss_config { __le16 capabilities; #define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 @@ -2669,12 +2879,8 @@ struct eth_vport_rss_config { #define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 #define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_TCP_FRAG_SHIFT 7 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_MASK 0x1 -#define ETH_VPORT_RSS_CONFIG_CALC_4TUP_UDP_FRAG_SHIFT 8 -#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x7F -#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 9 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 u8 rss_id; u8 rss_mode; u8 update_rss_key; @@ -2749,10 +2955,14 @@ struct rx_queue_start_ramrod_data { u8 pxp_tph_valid_pkt; u8 pxp_st_hint; __le16 pxp_st_index; - u8 reserved[4]; - struct regpair cqe_pbl_addr; - struct regpair bd_base; - struct regpair sge_base; + u8 pmd_mode; + u8 notify_en; + u8 toggle_val; + u8 reserved[7]; + __le16 reserved1; + struct regpair cqe_pbl_addr; + struct regpair bd_base; + struct regpair reserved2; }; struct rx_queue_stop_ramrod_data { @@ -2764,23 +2974,24 @@ struct rx_queue_stop_ramrod_data { }; struct rx_queue_update_ramrod_data { - __le16 rx_queue_id; - u8 complete_cqe_flg; - u8 complete_event_flg; - u8 init_sge_ring_flg; - u8 vport_id; - u8 pxp_tph_valid_sge; - u8 pxp_st_hint; - __le16 pxp_st_index; - u8 reserved[6]; - struct regpair sge_base; + __le16 rx_queue_id; + u8 complete_cqe_flg; + u8 complete_event_flg; + u8 vport_id; + u8 reserved[4]; + u8 reserved1; + u8 reserved2; + u8 reserved3; + __le16 reserved4; + __le16 reserved5; + struct regpair reserved6; }; struct tx_queue_start_ramrod_data { __le16 sb_id; u8 sb_index; u8 vport_id; - u8 tc; + u8 reserved0; u8 stats_counter_id; __le16 qm_pq_id; u8 flags; @@ -2790,18 +3001,25 @@ struct tx_queue_start_ramrod_data { #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 #define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 -#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_MASK 0x1F -#define TX_QUEUE_START_RAMROD_DATA_RESERVED0_SHIFT 3 - u8 pin_context; - u8 pxp_tph_valid_bd; - u8 pxp_tph_valid_pkt; - __le16 pxp_st_index; - u8 pxp_st_hint; - u8 reserved1[3]; - __le16 queue_zone_id; - __le16 test_dup_count; - __le16 pbl_size; - struct regpair pbl_base_addr; +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint; + u8 pxp_tph_valid_bd; + u8 pxp_tph_valid_pkt; + __le16 pxp_st_index; + __le16 comp_agg_size; + __le16 queue_zone_id; + __le16 test_dup_count; + __le16 pbl_size; + __le16 tx_queue_id; + struct regpair pbl_base_addr; + struct regpair bd_cons_address; }; struct tx_queue_stop_ramrod_data { @@ -2822,16 +3040,16 @@ struct vport_start_ramrod_data { struct eth_vport_rx_mode rx_mode; struct eth_vport_tx_mode tx_mode; struct eth_vport_tpa_param tpa_param; - __le16 sge_buff_size; - u8 max_sges_num; - u8 tx_switching_en; - u8 anti_spoofing_en; - u8 default_vlan_en; - u8 handle_ptp_pkts; - u8 silent_vlan_removal_en; - __le16 default_vlan; - u8 untagged; - u8 reserved[7]; + __le16 default_vlan; + u8 tx_switching_en; + u8 anti_spoofing_en; + u8 default_vlan_en; + u8 handle_ptp_pkts; + u8 silent_vlan_removal_en; + u8 untagged; + struct eth_tx_err_vals tx_err_behav; + u8 zero_placement_offset; + u8 reserved[7]; }; struct vport_stop_ramrod_data { @@ -2840,36 +3058,35 @@ struct vport_stop_ramrod_data { }; struct vport_update_ramrod_data_cmn { - u8 vport_id; - u8 update_rx_active_flg; - u8 rx_active_flg; - u8 update_tx_active_flg; - u8 tx_active_flg; - u8 update_rx_mode_flg; - u8 update_tx_mode_flg; - u8 update_approx_mcast_flg; - u8 update_rss_flg; - u8 update_inner_vlan_removal_en_flg; - u8 inner_vlan_removal_en; - u8 update_tpa_param_flg; - u8 update_tpa_en_flg; - u8 update_sge_param_flg; - __le16 sge_buff_size; - u8 max_sges_num; - u8 update_tx_switching_en_flg; - u8 tx_switching_en; - u8 update_anti_spoofing_en_flg; - u8 anti_spoofing_en; - u8 update_handle_ptp_pkts; - u8 handle_ptp_pkts; - u8 update_default_vlan_en_flg; - u8 default_vlan_en; - u8 update_default_vlan_flg; - __le16 default_vlan; - u8 update_accept_any_vlan_flg; - u8 accept_any_vlan; - u8 silent_vlan_removal_en; - u8 reserved; + u8 vport_id; + u8 update_rx_active_flg; + u8 rx_active_flg; + u8 update_tx_active_flg; + u8 tx_active_flg; + u8 update_rx_mode_flg; + u8 update_tx_mode_flg; + u8 update_approx_mcast_flg; + u8 update_rss_flg; + u8 update_inner_vlan_removal_en_flg; + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg; + u8 update_tx_switching_en_flg; + u8 tx_switching_en; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_handle_ptp_pkts; + u8 handle_ptp_pkts; + u8 update_default_vlan_en_flg; + u8 default_vlan_en; + u8 update_default_vlan_flg; + __le16 default_vlan; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + u8 silent_vlan_removal_en; + u8 update_mtu_flg; + __le16 mtu; + u8 reserved[2]; }; struct vport_update_ramrod_mcast { @@ -2885,436 +3102,6 @@ struct vport_update_ramrod_data { struct eth_vport_rss_config rss_config; }; -struct mstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 /* exist_in_qm0 */ -#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* cf0 */ -#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* cf1 */ -#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */ -#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 - __le16 word0 /* word0 */; - __le16 word1 /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; -}; - -struct tstorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */ -#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 - u8 flags1; -#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */ -#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */ -#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 - u8 flags2; -#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */ -#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */ -#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */ -#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */ -#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 - u8 flags3; -#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */ -#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */ -#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 - u8 flags4; -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */ -#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */ -#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */ -#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */ -#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */ -#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */ -#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */ -#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags5; -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */ -#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le32 reg4 /* reg4 */; - __le32 reg5 /* reg5 */; - __le32 reg6 /* reg6 */; - __le32 reg7 /* reg7 */; - __le32 reg8 /* reg8 */; - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 rx_bd_cons /* word0 */; - u8 byte4 /* byte4 */; - u8 byte5 /* byte5 */; - __le16 rx_bd_prod /* word1 */; - __le16 word2 /* conn_dpi */; - __le16 word3 /* word3 */; - __le32 reg9 /* reg9 */; - __le32 reg10 /* reg10 */; -}; - -struct ustorm_eth_conn_ag_ctx { - u8 byte0 /* cdu_validation */; - u8 byte1 /* state */; - u8 flags0; -#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 -#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */ -#define USTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */ -#define USTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */ -#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 - u8 flags1; -#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 -#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 - u8 flags2; -#define USTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */ -#define USTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */ -#define USTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */ -#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */ -#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */ -#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */ -#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */ -#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */ -#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 - u8 flags3; -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */ -#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */ -#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */ -#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */ -#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */ -#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */ -#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */ -#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */ -#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 - u8 byte2 /* byte2 */; - u8 byte3 /* byte3 */; - __le16 word0 /* conn_dpi */; - __le16 tx_bd_cons /* word1 */; - __le32 reg0 /* reg0 */; - __le32 reg1 /* reg1 */; - __le32 reg2 /* reg2 */; - __le32 reg3 /* reg3 */; - __le16 tx_drv_bd_cons /* word2 */; - __le16 rx_drv_cqe_cons /* word3 */; -}; - -struct xstorm_eth_hw_conn_ag_ctx { - u8 reserved0 /* cdu_validation */; - u8 eth_state /* state */; - u8 flags0; -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 - u8 flags1; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 - u8 flags2; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 - u8 flags3; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 - u8 flags4; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 - u8 flags5; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 - u8 flags6; -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 - u8 flags7; -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 - u8 flags8; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 - u8 flags9; -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 - u8 flags10; -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 - u8 flags11; -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 - u8 flags12; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 - u8 flags13; -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 - u8 flags14; -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 -#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 -#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 - u8 edpm_event_id /* byte2 */; - __le16 physical_q0 /* physical_q0 */; - __le16 word1 /* physical_q1 */; - __le16 edpm_num_bds /* physical_q2 */; - __le16 tx_bd_cons /* word3 */; - __le16 tx_bd_prod /* word4 */; - __le16 go_to_bd_cons /* word5 */; - __le16 conn_dpi /* conn_dpi */; -}; - #define VF_MAX_STATIC 192 /* In case of K2 */ #define MCP_GLOB_PATH_MAX 2 @@ -3818,6 +3605,10 @@ struct public_port { struct dcbx_local_params local_admin_dcbx_mib; struct dcbx_mib remote_dcbx_mib; struct dcbx_mib operational_dcbx_mib; + + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + u32 transceiver_data; }; /**************************************/ @@ -3830,7 +3621,11 @@ struct public_func { u32 iscsi_boot_signature; u32 iscsi_boot_block_offset; - u32 reserved[8]; + u32 mtu_size; + u32 c2s_pcp_map_lower; + u32 c2s_pcp_map_upper; + u32 c2s_pcp_map_default; + u32 reserved[4]; u32 config; @@ -3894,10 +3689,10 @@ struct public_func { #define DRV_ID_MCP_HSI_VER_SHIFT 16 #define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT) -#define DRV_ID_DRV_TYPE_MASK 0xff000000 +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_SHIFT 24 #define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) -#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) @@ -3905,6 +3700,10 @@ struct public_func { #define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) #define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT) }; /**************************************/ @@ -3964,6 +3763,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_MASK 0xffff0000 #define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000 @@ -4100,6 +3900,7 @@ struct public_drv_mb { #define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 #define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 #define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 +#define FW_MSG_CODE_OK 0x00160000 #define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff @@ -4212,7 +4013,7 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 #define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 #define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 -#define NVM_CFG1_GLOB_MF_MODE_FORCED_SF 0x1 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 #define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 #define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 #define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 @@ -4643,8 +4444,12 @@ struct nvm_cfg1_glob { #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F #define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 - - u32 reserved[46]; /* 0x88 */ + u32 device_capabilities; /* 0x88 */ +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 + u32 power_dissipated; /* 0x8C */ + u32 power_consumed; /* 0x90 */ + u32 efi_version; /* 0x94 */ + u32 reserved[42]; /* 0x98 */ }; struct nvm_cfg1_path { @@ -4652,26 +4457,8 @@ struct nvm_cfg1_path { }; struct nvm_cfg1_port { - u32 power_dissipated; /* 0x0 */ -#define NVM_CFG1_PORT_POWER_DIS_D0_MASK 0x000000FF -#define NVM_CFG1_PORT_POWER_DIS_D0_OFFSET 0 -#define NVM_CFG1_PORT_POWER_DIS_D1_MASK 0x0000FF00 -#define NVM_CFG1_PORT_POWER_DIS_D1_OFFSET 8 -#define NVM_CFG1_PORT_POWER_DIS_D2_MASK 0x00FF0000 -#define NVM_CFG1_PORT_POWER_DIS_D2_OFFSET 16 -#define NVM_CFG1_PORT_POWER_DIS_D3_MASK 0xFF000000 -#define NVM_CFG1_PORT_POWER_DIS_D3_OFFSET 24 - - u32 power_consumed; /* 0x4 */ -#define NVM_CFG1_PORT_POWER_CONS_D0_MASK 0x000000FF -#define NVM_CFG1_PORT_POWER_CONS_D0_OFFSET 0 -#define NVM_CFG1_PORT_POWER_CONS_D1_MASK 0x0000FF00 -#define NVM_CFG1_PORT_POWER_CONS_D1_OFFSET 8 -#define NVM_CFG1_PORT_POWER_CONS_D2_MASK 0x00FF0000 -#define NVM_CFG1_PORT_POWER_CONS_D2_OFFSET 16 -#define NVM_CFG1_PORT_POWER_CONS_D3_MASK 0xFF000000 -#define NVM_CFG1_PORT_POWER_CONS_D3_OFFSET 24 - + u32 reserved__m_relocated_to_option_123; /* 0x0 */ + u32 reserved__m_relocated_to_option_124; /* 0x4 */ u32 generic_cont0; /* 0x8 */ #define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF #define NVM_CFG1_PORT_LED_MODE_OFFSET 0 @@ -4699,7 +4486,9 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 #define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 #define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 - +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 u32 pcie_cfg; /* 0xC */ #define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 #define NVM_CFG1_PORT_RESERVED15_OFFSET 0 @@ -4784,10 +4573,11 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB #define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0xD -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0xE -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0xF -#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x10 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 #define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 #define NVM_CFG1_PORT_AN_MODE_OFFSET 24 #define NVM_CFG1_PORT_AN_MODE_NONE 0x0 @@ -4801,9 +4591,6 @@ struct nvm_cfg1_port { u32 mgmt_traffic; /* 0x20 */ #define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F #define NVM_CFG1_PORT_RESERVED61_OFFSET 0 -#define NVM_CFG1_PORT_RESERVED61_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_RMII 0x1 -#define NVM_CFG1_PORT_RESERVED61_NCSI_OVER_SMBUS 0x2 u32 ext_phy; /* 0x24 */ #define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF @@ -4814,16 +4601,12 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 u32 mba_cfg1; /* 0x28 */ -#define NVM_CFG1_PORT_MBA_MASK 0x00000001 -#define NVM_CFG1_PORT_MBA_OFFSET 0 -#define NVM_CFG1_PORT_MBA_DISABLED 0x0 -#define NVM_CFG1_PORT_MBA_ENABLED 0x1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_MASK 0x00000006 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_OFFSET 1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_AUTO 0x0 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_BBS 0x1 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT18H 0x2 -#define NVM_CFG1_PORT_MBA_BOOT_TYPE_INT19H 0x3 +#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 +#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 #define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 #define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 #define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 @@ -4836,61 +4619,30 @@ struct nvm_cfg1_port { #define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 #define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 #define NVM_CFG1_PORT_RESERVED5_OFFSET 9 -#define NVM_CFG1_PORT_RESERVED5_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED5_2K 0x1 -#define NVM_CFG1_PORT_RESERVED5_4K 0x2 -#define NVM_CFG1_PORT_RESERVED5_8K 0x3 -#define NVM_CFG1_PORT_RESERVED5_16K 0x4 -#define NVM_CFG1_PORT_RESERVED5_32K 0x5 -#define NVM_CFG1_PORT_RESERVED5_64K 0x6 -#define NVM_CFG1_PORT_RESERVED5_128K 0x7 -#define NVM_CFG1_PORT_RESERVED5_256K 0x8 -#define NVM_CFG1_PORT_RESERVED5_512K 0x9 -#define NVM_CFG1_PORT_RESERVED5_1M 0xA -#define NVM_CFG1_PORT_RESERVED5_2M 0xB -#define NVM_CFG1_PORT_RESERVED5_4M 0xC -#define NVM_CFG1_PORT_RESERVED5_8M 0xD -#define NVM_CFG1_PORT_RESERVED5_16M 0xE -#define NVM_CFG1_PORT_RESERVED5_32M 0xF -#define NVM_CFG1_PORT_MBA_LINK_SPEED_MASK 0x001E0000 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_OFFSET 17 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_AUTONEG 0x0 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_1G 0x1 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_10G 0x2 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_25G 0x4 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_40G 0x5 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_50G 0x6 -#define NVM_CFG1_PORT_MBA_LINK_SPEED_100G 0x7 -#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 -#define NVM_CFG1_PORT_MBA_BOOT_RETRY_COUNT_OFFSET 21 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 u32 mba_cfg2; /* 0x2C */ -#define NVM_CFG1_PORT_MBA_VLAN_VALUE_MASK 0x0000FFFF -#define NVM_CFG1_PORT_MBA_VLAN_VALUE_OFFSET 0 -#define NVM_CFG1_PORT_MBA_VLAN_MASK 0x00010000 -#define NVM_CFG1_PORT_MBA_VLAN_OFFSET 16 +#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF +#define NVM_CFG1_PORT_RESERVED65_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 +#define NVM_CFG1_PORT_RESERVED66_OFFSET 16 u32 vf_cfg; /* 0x30 */ #define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF #define NVM_CFG1_PORT_RESERVED8_OFFSET 0 #define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 #define NVM_CFG1_PORT_RESERVED6_OFFSET 16 -#define NVM_CFG1_PORT_RESERVED6_DISABLED 0x0 -#define NVM_CFG1_PORT_RESERVED6_4K 0x1 -#define NVM_CFG1_PORT_RESERVED6_8K 0x2 -#define NVM_CFG1_PORT_RESERVED6_16K 0x3 -#define NVM_CFG1_PORT_RESERVED6_32K 0x4 -#define NVM_CFG1_PORT_RESERVED6_64K 0x5 -#define NVM_CFG1_PORT_RESERVED6_128K 0x6 -#define NVM_CFG1_PORT_RESERVED6_256K 0x7 -#define NVM_CFG1_PORT_RESERVED6_512K 0x8 -#define NVM_CFG1_PORT_RESERVED6_1M 0x9 -#define NVM_CFG1_PORT_RESERVED6_2M 0xA -#define NVM_CFG1_PORT_RESERVED6_4M 0xB -#define NVM_CFG1_PORT_RESERVED6_8M 0xC -#define NVM_CFG1_PORT_RESERVED6_16M 0xD -#define NVM_CFG1_PORT_RESERVED6_32M 0xE -#define NVM_CFG1_PORT_RESERVED6_64M 0xF struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ @@ -4973,18 +4725,16 @@ struct nvm_cfg1_func { u32 device_id; /* 0x10 */ #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF #define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 -#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_MASK 0xFFFF0000 -#define NVM_CFG1_FUNC_VENDOR_DEVICE_ID_OFFSET 16 +#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 u32 cmn_cfg; /* 0x14 */ -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_MASK 0x00000007 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_OFFSET 0 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_PXE 0x0 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_RPL 0x1 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_BOOTP 0x2 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_ISCSI_BOOT 0x3 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_FCOE_BOOT 0x4 -#define NVM_CFG1_FUNC_MBA_BOOT_PROTOCOL_NONE 0x7 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 #define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 #define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 @@ -5029,8 +4779,8 @@ struct nvm_cfg1_func { struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */ - - u32 reserved[9]; /* 0x2C */ + u32 preboot_generic_cfg; /* 0x2C */ + u32 reserved[8]; /* 0x30 */ }; struct nvm_cfg1 { diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c index 0b21a553cc7d..f55ebdc3c832 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c @@ -513,17 +513,14 @@ static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, * Return -1 on error. */ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, - u8 start_vport, u8 num_vports, struct init_qm_vport_params *vport_params) { - u8 tc, i, vport_id; u32 inc_val; + u8 tc, i; /* go over all PF VPORTs */ - for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { - u32 temp = QM_REG_WFQVPUPPERBOUND_RT_OFFSET; - u16 *pq_ids = &vport_params[i].first_tx_pq_id[0]; + for (i = 0; i < num_vports; i++) { if (!vport_params[i].vport_wfq) continue; @@ -539,20 +536,16 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn, * different TCs */ for (tc = 0; tc < NUM_OF_TCS; tc++) { - u16 vport_pq_id = pq_ids[tc]; + u16 vport_pq_id = vport_params[i].first_tx_pq_id[tc]; if (vport_pq_id != QM_INVALID_PQ_ID) { - STORE_RT_REG(p_hwfn, - QM_REG_WFQVPWEIGHT_RT_OFFSET + - vport_pq_id, inc_val); - STORE_RT_REG(p_hwfn, temp + vport_pq_id, - QM_WFQ_UPPER_BOUND | - QM_WFQ_CRD_REG_SIGN_BIT); STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET + vport_pq_id, - QM_WFQ_INIT_CRD(inc_val) | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); } } } @@ -709,8 +702,7 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, if (qed_pf_rl_rt_init(p_hwfn, p_params->pf_id, p_params->pf_rl)) return -1; - if (qed_vp_wfq_rt_init(p_hwfn, p_params->start_vport, - p_params->num_vports, vport_params)) + if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params)) return -1; if (qed_vport_rl_rt_init(p_hwfn, p_params->start_vport, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c index 796f1390e598..3269b3610e03 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c @@ -55,63 +55,98 @@ void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn) int i; for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) - p_hwfn->rt_data[i].b_valid = false; + p_hwfn->rt_data.b_valid[i] = false; } void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val) { - p_hwfn->rt_data[rt_offset].init_val = val; - p_hwfn->rt_data[rt_offset].b_valid = true; + p_hwfn->rt_data.init_val[rt_offset] = val; + p_hwfn->rt_data.b_valid[rt_offset] = true; } void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, - u32 rt_offset, - u32 *val, + u32 rt_offset, u32 *p_val, size_t size) { size_t i; for (i = 0; i < size / sizeof(u32); i++) { - p_hwfn->rt_data[rt_offset + i].init_val = val[i]; - p_hwfn->rt_data[rt_offset + i].b_valid = true; + p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; + p_hwfn->rt_data.b_valid[rt_offset + i] = true; } } -static void qed_init_rt(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 addr, - u32 rt_offset, - u32 size) +static int qed_init_rt(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 addr, + u16 rt_offset, + u16 size, + bool b_must_dmae) { - struct qed_rt_data *rt_data = p_hwfn->rt_data + rt_offset; - u32 i; + u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; + bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; + u16 i, segment; + int rc = 0; + /* Since not all RT entries are initialized, go over the RT and + * for each segment of initialized values use DMA. + */ for (i = 0; i < size; i++) { - if (!rt_data[i].b_valid) + if (!p_valid[i]) continue; - qed_wr(p_hwfn, p_ptt, addr + (i << 2), rt_data[i].init_val); + + /* In case there isn't any wide-bus configuration here, + * simply write the data instead of using dmae. + */ + if (!b_must_dmae) { + qed_wr(p_hwfn, p_ptt, addr + (i << 2), + p_init_val[i]); + continue; + } + + /* Start of a new segment */ + for (segment = 1; i + segment < size; segment++) + if (!p_valid[i + segment]) + break; + + rc = qed_dmae_host2grc(p_hwfn, p_ptt, + (uintptr_t)(p_init_val + i), + addr + (i << 2), segment, 0); + if (rc != 0) + return rc; + + /* Jump over the entire segment, including invalid entry */ + i += segment; } + + return rc; } int qed_init_alloc(struct qed_hwfn *p_hwfn) { - struct qed_rt_data *rt_data; + struct qed_rt_data *rt_data = &p_hwfn->rt_data; - rt_data = kzalloc(sizeof(*rt_data) * RUNTIME_ARRAY_SIZE, GFP_ATOMIC); - if (!rt_data) + rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE, + GFP_KERNEL); + if (!rt_data->b_valid) return -ENOMEM; - p_hwfn->rt_data = rt_data; + rt_data->init_val = kzalloc(sizeof(u32) * RUNTIME_ARRAY_SIZE, + GFP_KERNEL); + if (!rt_data->init_val) { + kfree(rt_data->b_valid); + return -ENOMEM; + } return 0; } void qed_init_free(struct qed_hwfn *p_hwfn) { - kfree(p_hwfn->rt_data); - p_hwfn->rt_data = NULL; + kfree(p_hwfn->rt_data.init_val); + kfree(p_hwfn->rt_data.b_valid); } static int qed_init_array_dmae(struct qed_hwfn *p_hwfn, @@ -289,7 +324,8 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn, case INIT_SRC_RUNTIME: qed_init_rt(p_hwfn, p_ptt, addr, le16_to_cpu(arg->runtime.offset), - le16_to_cpu(arg->runtime.size)); + le16_to_cpu(arg->runtime.size), + b_must_dmae); break; } @@ -316,49 +352,50 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct init_read_op *cmd) { - u32 data = le32_to_cpu(cmd->op_data); - u32 addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + bool (*comp_check)(u32 val, u32 expected_val); + u32 delay = QED_INIT_POLL_PERIOD_US, val; + u32 data, addr, poll; + int i; + + data = le32_to_cpu(cmd->op_data); + addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); - bool (*comp_check)(u32 val, - u32 expected_val); - u32 delay = QED_INIT_POLL_PERIOD_US, val; val = qed_rd(p_hwfn, p_ptt, addr); - data = le32_to_cpu(cmd->op_data); - if (GET_FIELD(data, INIT_READ_OP_POLL)) { - int i; + if (poll == INIT_POLL_NONE) + return; - switch (GET_FIELD(data, INIT_READ_OP_POLL_COMP)) { - case INIT_COMPARISON_EQ: - comp_check = comp_eq; - break; - case INIT_COMPARISON_OR: - comp_check = comp_or; - break; - case INIT_COMPARISON_AND: - comp_check = comp_and; - break; - default: - comp_check = NULL; - DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", - data); - return; - } + switch (poll) { + case INIT_POLL_EQ: + comp_check = comp_eq; + break; + case INIT_POLL_OR: + comp_check = comp_or; + break; + case INIT_POLL_AND: + comp_check = comp_and; + break; + default: + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + cmd->op_data); + return; + } - for (i = 0; - i < QED_INIT_MAX_POLL_COUNT && - !comp_check(val, le32_to_cpu(cmd->expected_val)); - i++) { - udelay(delay); - val = qed_rd(p_hwfn, p_ptt, addr); - } + data = le32_to_cpu(cmd->expected_val); + for (i = 0; + i < QED_INIT_MAX_POLL_COUNT && !comp_check(val, data); + i++) { + udelay(delay); + val = qed_rd(p_hwfn, p_ptt, addr); + } - if (i == QED_INIT_MAX_POLL_COUNT) - DP_ERR(p_hwfn, - "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", - addr, le32_to_cpu(cmd->expected_val), - val, data); + if (i == QED_INIT_MAX_POLL_COUNT) { + DP_ERR(p_hwfn, + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparsion %08x)]\n", + addr, le32_to_cpu(cmd->expected_val), + val, le32_to_cpu(cmd->op_data)); } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index f72036a2ef5b..978d07a61bbf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -714,7 +714,6 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->sb_id = cpu_to_le16(p_params->sb); p_ramrod->sb_index = p_params->sb_idx; p_ramrod->stats_counter_id = stats_id; - p_ramrod->tc = p_pq_params->eth.tc; p_ramrod->pbl_size = cpu_to_le16(pbl_size); p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); @@ -821,9 +820,8 @@ qed_filter_action(enum qed_filter_opcode opcode) case QED_FILTER_REMOVE: action = ETH_FILTER_ACTION_REMOVE; break; - case QED_FILTER_REPLACE: case QED_FILTER_FLUSH: - action = ETH_FILTER_ACTION_REPLACE; + action = ETH_FILTER_ACTION_REMOVE_ALL; break; default: action = MAX_ETH_FILTER_ACTION; @@ -892,8 +890,7 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; switch (p_filter_cmd->opcode) { - case QED_FILTER_FLUSH: - p_ramrod->filter_cmd_hdr.cmd_cnt = 0; break; + case QED_FILTER_REPLACE: case QED_FILTER_MOVE: p_ramrod->filter_cmd_hdr.cmd_cnt = 2; break; default: @@ -962,6 +959,12 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, p_second_filter->action = ETH_FILTER_ACTION_ADD; p_second_filter->vport_id = vport_to_add_to; + } else if (p_filter_cmd->opcode == QED_FILTER_REPLACE) { + p_first_filter->vport_id = vport_to_add_to; + memcpy(p_second_filter, p_first_filter, + sizeof(*p_second_filter)); + p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; + p_second_filter->action = ETH_FILTER_ACTION_ADD; } else { action = qed_filter_action(p_filter_cmd->opcode); diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9d76ce249277..593f8871adb6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -190,7 +190,7 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->pci_mem_start = cdev->pci_params.mem_start; dev_info->pci_mem_end = cdev->pci_params.mem_end; dev_info->pci_irq = cdev->pci_params.irq; - dev_info->is_mf = IS_MF(&cdev->hwfns[0]); + dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); dev_info->fw_major = FW_MAJOR_VERSION; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index ba1b1f1ef789..1457e30faccf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -720,26 +720,25 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, return -EINVAL; } - if (p_hwfn->cdev->mf_mode != SF) { - info->bandwidth_min = (shmem_info.config & - FUNC_MF_CFG_MIN_BW_MASK) >> - FUNC_MF_CFG_MIN_BW_SHIFT; - if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { - DP_INFO(p_hwfn, - "bandwidth minimum out of bounds [%02x]. Set to 1\n", - info->bandwidth_min); - info->bandwidth_min = 1; - } - info->bandwidth_max = (shmem_info.config & - FUNC_MF_CFG_MAX_BW_MASK) >> - FUNC_MF_CFG_MAX_BW_SHIFT; - if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { - DP_INFO(p_hwfn, - "bandwidth maximum out of bounds [%02x]. Set to 100\n", - info->bandwidth_max); - info->bandwidth_max = 100; - } + info->bandwidth_min = (shmem_info.config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (info->bandwidth_min < 1 || info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + info->bandwidth_min); + info->bandwidth_min = 1; + } + + info->bandwidth_max = (shmem_info.config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (info->bandwidth_max < 1 || info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + info->bandwidth_max); + info->bandwidth_max = 100; } if (shmem_info.mac_upper || shmem_info.mac_lower) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 287fadfab52d..8a83609c443c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -343,7 +343,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum mf_mode mode); + enum qed_mf_mode mode); /** * @brief qed_sp_pf_stop - PF Function Stop Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 6f7879136633..33090f63548c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -90,7 +90,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, } int qed_sp_pf_start(struct qed_hwfn *p_hwfn, - enum mf_mode mode) + enum qed_mf_mode mode) { struct qed_sp_init_request_params params; struct pf_start_ramrod_data *p_ramrod = NULL; @@ -125,6 +125,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->dont_log_ramrods = 0; p_ramrod->log_type_mask = cpu_to_le16(0xf); p_ramrod->mf_mode = mode; + switch (mode) { + case QED_MF_DEFAULT: + case QED_MF_NPAR: + p_ramrod->mf_mode = MF_NPAR; + break; + case QED_MF_OVLAN: + p_ramrod->mf_mode = MF_OVLAN; + break; + default: + DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); + p_ramrod->mf_mode = MF_NPAR; + } p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ @@ -142,9 +154,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_hwfn->hw_info.personality = PERSONALITY_ETH; DP_VERBOSE(p_hwfn, QED_MSG_SPQ, - "Setting event_ring_sb [id %04x index %02x], mf [%s] outer_tag [%d]\n", + "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", sb, sb_index, - (p_ramrod->mf_mode == SF) ? "SF" : "Multi-Pf", p_ramrod->outer_tag); return qed_spq_post(p_hwfn, p_ent, NULL); diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 7c6caf7f6612..f75d9e0676ce 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -173,9 +173,9 @@ enum QEDE_STATE { * skb are built only after the frame was DMA-ed. */ struct sw_rx_data { - u8 *data; - - DEFINE_DMA_UNMAP_ADDR(mapping); + struct page *data; + dma_addr_t mapping; + unsigned int page_offset; }; struct qede_rx_queue { @@ -188,6 +188,7 @@ struct qede_rx_queue { void __iomem *hw_rxq_prod_addr; int rx_buf_size; + unsigned int rx_buf_seg_size; u16 num_rx_buffers; u16 rxq_id; @@ -281,6 +282,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev); #define NUM_TX_BDS_MIN 128 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define QEDE_RX_HDR_SIZE 256 #define for_each_rss(i) for (i = 0; i < edev->num_rss; i++) #endif /* _QEDE_H_ */ diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index e442b85c9a5e..c49dc10ce151 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -217,9 +217,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) struct qed_link_params params; u32 speed; - if (edev->dev_info.common.is_mf) { + if (!edev->dev_info.common.is_mf_default) { DP_INFO(edev, - "Link parameters can not be changed in MF mode\n"); + "Link parameters can not be changed in non-default mode\n"); return -EOPNOTSUPP; } @@ -428,7 +428,7 @@ static int qede_set_pauseparam(struct net_device *dev, struct qed_link_params params; struct qed_link_output current_link; - if (!edev->dev_info.common.is_mf) { + if (!edev->dev_info.common.is_mf_default) { DP_INFO(edev, "Pause parameters can not be updated in non-default mode\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6237f10b5119..f50e0bd7fb2c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -330,15 +330,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, struct eth_tx_3rd_bd *third_bd) { u8 l4_proto; - u16 bd2_bits = 0, bd2_bits2 = 0; + u16 bd2_bits1 = 0, bd2_bits2 = 0; - bd2_bits2 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); + bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT); - bd2_bits |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & + bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT; - bd2_bits2 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << + bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH << ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT); if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) @@ -347,16 +347,15 @@ static void qede_set_params_for_ipv6_ext(struct sk_buff *skb, l4_proto = ip_hdr(skb)->protocol; if (l4_proto == IPPROTO_UDP) - bd2_bits2 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; + bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT; - if (third_bd) { + if (third_bd) third_bd->data.bitfields |= - ((tcp_hdrlen(skb) / 4) & - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << - ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT; - } + cpu_to_le16(((tcp_hdrlen(skb) / 4) & + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) << + ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT); - second_bd->data.bitfields = cpu_to_le16(bd2_bits); + second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); } @@ -464,12 +463,16 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* Fill the parsing flags & params according to the requested offload */ if (xmit_type & XMIT_L4_CSUM) { + u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT; + /* We don't re-calculate IP checksum as it is already done by * the upper stack */ first_bd->data.bd_flags.bitfields |= 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + first_bd->data.bitfields |= cpu_to_le16(temp); + /* If the packet is IPv6 with extension header, indicate that * to FW and pass few params, since the device cracker doesn't * support parsing IPv6 with extension header/s. @@ -491,7 +494,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, /* @@@TBD - if will not be removed need to check */ third_bd->data.bitfields |= - (1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); + cpu_to_le16((1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT)); /* Make life easier for FW guys who can't deal with header and * data on same BD. If we need to split, use the second bd... @@ -719,26 +722,52 @@ static bool qede_has_tx_work(struct qede_fastpath *fp) return false; } -/* This function copies the Rx buffer from the CONS position to the PROD - * position, since we failed to allocate a new Rx buffer. +/* This function reuses the buffer(from an offset) from + * consumer index to producer index in the bd ring */ -static void qede_reuse_rx_data(struct qede_rx_queue *rxq) +static inline void qede_reuse_page(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) { - struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); - struct sw_rx_data *sw_rx_data_cons = - &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; - struct sw_rx_data *sw_rx_data_prod = - &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + struct sw_rx_data *curr_prod; + dma_addr_t new_mapping; - dma_unmap_addr_set(sw_rx_data_prod, mapping, - dma_unmap_addr(sw_rx_data_cons, mapping)); + curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + *curr_prod = *curr_cons; - sw_rx_data_prod->data = sw_rx_data_cons->data; - memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); + new_mapping = curr_prod->mapping + curr_prod->page_offset; + + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping)); - rxq->sw_rx_cons++; rxq->sw_rx_prod++; + curr_cons->data = NULL; +} + +static inline int qede_realloc_rx_buffer(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct sw_rx_data *curr_cons) +{ + /* Move to the next segment in the page */ + curr_cons->page_offset += rxq->rx_buf_seg_size; + + if (curr_cons->page_offset == PAGE_SIZE) { + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + return -ENOMEM; + + dma_unmap_page(&edev->pdev->dev, curr_cons->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + } else { + /* Increment refcount of the page as we don't want + * network stack to take the ownership of the page + * which can be recycled multiple times by the driver. + */ + atomic_inc(&curr_cons->data->_count); + qede_reuse_page(edev, rxq, curr_cons); + } + + return 0; } static inline void qede_update_rx_prod(struct qede_dev *edev, @@ -857,9 +886,10 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) struct sw_rx_data *sw_rx_data; union eth_rx_cqe *cqe; struct sk_buff *skb; + struct page *data; + __le16 flags; u16 len, pad; u32 rx_hash; - u8 *data; /* Get the CQE from the completion ring */ cqe = (union eth_rx_cqe *) @@ -879,56 +909,110 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) data = sw_rx_data->data; fp_cqe = &cqe->fast_path_regular; - len = le16_to_cpu(fp_cqe->pkt_len); + len = le16_to_cpu(fp_cqe->len_on_first_bd); pad = fp_cqe->placement_offset; + flags = cqe->fast_path_regular.pars_flags.flags; - /* For every Rx BD consumed, we allocate a new BD so the BD ring - * is always with a fixed size. If allocation fails, we take the - * consumed BD and return it to the ring in the PROD position. - * The packet that was received on that BD will be dropped (and - * not passed to the upper stack). - */ - if (likely(qede_alloc_rx_buffer(edev, rxq) == 0)) { - dma_unmap_single(&edev->pdev->dev, - dma_unmap_addr(sw_rx_data, mapping), - rxq->rx_buf_size, DMA_FROM_DEVICE); - - /* If this is an error packet then drop it */ - parse_flag = - le16_to_cpu(cqe->fast_path_regular.pars_flags.flags); - csum_flag = qede_check_csum(parse_flag); - if (csum_flag == QEDE_CSUM_ERROR) { - DP_NOTICE(edev, - "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", - sw_comp_cons, parse_flag); - rxq->rx_hw_errors++; - kfree(data); - goto next_rx; - } - - skb = build_skb(data, 0); - - if (unlikely(!skb)) { - DP_NOTICE(edev, - "Build_skb failed, dropping incoming packet\n"); - kfree(data); - rxq->rx_alloc_errors++; - goto next_rx; - } + /* If this is an error packet then drop it */ + parse_flag = le16_to_cpu(flags); - skb_reserve(skb, pad); + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + DP_NOTICE(edev, + "CQE in CONS = %u has error, flags = %x, dropping incoming packet\n", + sw_comp_cons, parse_flag); + rxq->rx_hw_errors++; + qede_reuse_page(edev, rxq, sw_rx_data); + goto next_rx; + } - } else { + skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); + if (unlikely(!skb)) { DP_NOTICE(edev, - "New buffer allocation failed, dropping incoming packet and reusing its buffer\n"); - qede_reuse_rx_data(rxq); + "Build_skb failed, dropping incoming packet\n"); + qede_reuse_page(edev, rxq, sw_rx_data); rxq->rx_alloc_errors++; - goto next_cqe; + goto next_rx; } - sw_rx_data->data = NULL; + /* Copy data into SKB */ + if (len + pad <= QEDE_RX_HDR_SIZE) { + memcpy(skb_put(skb, len), + page_address(data) + pad + + sw_rx_data->page_offset, len); + qede_reuse_page(edev, rxq, sw_rx_data); + } else { + struct skb_frag_struct *frag; + unsigned int pull_len; + unsigned char *va; - skb_put(skb, len); + frag = &skb_shinfo(skb)->frags[0]; + + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, data, + pad + sw_rx_data->page_offset, + len, rxq->rx_buf_seg_size); + + va = skb_frag_address(frag); + pull_len = eth_get_headlen(va, QEDE_RX_HDR_SIZE); + + /* Align the pull_len to optimize memcpy */ + memcpy(skb->data, va, ALIGN(pull_len, sizeof(long))); + + skb_frag_size_sub(frag, pull_len); + frag->page_offset += pull_len; + skb->data_len -= pull_len; + skb->tail += pull_len; + + if (unlikely(qede_realloc_rx_buffer(edev, rxq, + sw_rx_data))) { + DP_ERR(edev, "Failed to allocate rx buffer\n"); + rxq->rx_alloc_errors++; + goto next_cqe; + } + } + + if (fp_cqe->bd_num != 1) { + u16 pkt_len = le16_to_cpu(fp_cqe->pkt_len); + u8 num_frags; + + pkt_len -= len; + + for (num_frags = fp_cqe->bd_num - 1; num_frags > 0; + num_frags--) { + u16 cur_size = pkt_len > rxq->rx_buf_size ? + rxq->rx_buf_size : pkt_len; + + WARN_ONCE(!cur_size, + "Still got %d BDs for mapping jumbo, but length became 0\n", + num_frags); + + if (unlikely(qede_alloc_rx_buffer(edev, rxq))) + goto next_cqe; + + rxq->sw_rx_cons++; + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; + sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; + qed_chain_consume(&rxq->rx_bd_ring); + dma_unmap_page(&edev->pdev->dev, + sw_rx_data->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); + + skb_fill_page_desc(skb, + skb_shinfo(skb)->nr_frags++, + sw_rx_data->data, 0, + cur_size); + + skb->truesize += PAGE_SIZE; + skb->data_len += cur_size; + skb->len += cur_size; + pkt_len -= cur_size; + } + + if (pkt_len) + DP_ERR(edev, + "Mapped all BDs of jumbo, but still have %d bytes\n", + pkt_len); + } skb->protocol = eth_type_trans(skb, edev->ndev); @@ -1566,17 +1650,17 @@ static void qede_free_rx_buffers(struct qede_dev *edev, for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) { struct sw_rx_data *rx_buf; - u8 *data; + struct page *data; rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX]; data = rx_buf->data; - dma_unmap_single(&edev->pdev->dev, - dma_unmap_addr(rx_buf, mapping), - rxq->rx_buf_size, DMA_FROM_DEVICE); + dma_unmap_page(&edev->pdev->dev, + rx_buf->mapping, + PAGE_SIZE, DMA_FROM_DEVICE); rx_buf->data = NULL; - kfree(data); + __free_page(data); } } @@ -1600,29 +1684,32 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, struct sw_rx_data *sw_rx_data; struct eth_rx_bd *rx_bd; dma_addr_t mapping; + struct page *data; u16 rx_buf_size; - u8 *data; rx_buf_size = rxq->rx_buf_size; - data = kmalloc(rx_buf_size, GFP_ATOMIC); + data = alloc_pages(GFP_ATOMIC, 0); if (unlikely(!data)) { - DP_NOTICE(edev, "Failed to allocate Rx data\n"); + DP_NOTICE(edev, "Failed to allocate Rx data [page]\n"); return -ENOMEM; } - mapping = dma_map_single(&edev->pdev->dev, data, - rx_buf_size, DMA_FROM_DEVICE); + /* Map the entire page as it would be used + * for multiple RX buffer segment size mapping. + */ + mapping = dma_map_page(&edev->pdev->dev, data, 0, + PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { - kfree(data); + __free_page(data); DP_NOTICE(edev, "Failed to map Rx buffer\n"); return -ENOMEM; } sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + sw_rx_data->page_offset = 0; sw_rx_data->data = data; - - dma_unmap_addr_set(sw_rx_data, mapping, mapping); + sw_rx_data->mapping = mapping; /* Advance PROD and get BD pointer */ rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); @@ -1643,13 +1730,16 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rxq->num_rx_buffers = edev->q_num_rx_buffers; - rxq->rx_buf_size = NET_IP_ALIGN + - ETH_OVERHEAD + - edev->ndev->mtu + - QEDE_FW_RX_ALIGN_END; + rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + + edev->ndev->mtu; + if (rxq->rx_buf_size > PAGE_SIZE) + rxq->rx_buf_size = PAGE_SIZE; + + /* Segment size to spilt a page in multiple equal parts */ + rxq->rx_buf_seg_size = roundup_pow_of_two(rxq->rx_buf_size); /* Allocate the parallel driver ring for Rx buffers */ - size = sizeof(*rxq->sw_rx_ring) * NUM_RX_BDS_MAX; + size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE; rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL); if (!rxq->sw_rx_ring) { DP_ERR(edev, "Rx buffers ring allocation failed\n"); @@ -1660,7 +1750,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_MODE_NEXT_PTR, - NUM_RX_BDS_MAX, + RX_RING_SIZE, sizeof(struct eth_rx_bd), &rxq->rx_bd_ring); @@ -1671,7 +1761,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, rc = edev->ops->common->chain_alloc(edev->cdev, QED_CHAIN_USE_TO_CONSUME, QED_CHAIN_MODE_PBL, - NUM_RX_BDS_MAX, + RX_RING_SIZE, sizeof(union eth_rx_cqe), &rxq->rx_comp_ring); if (rc) diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 1d1ba2c5ee7a..53ecb37ae563 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h @@ -11,9 +11,11 @@ #define CORE_SPQE_PAGE_SIZE_BYTES 4096 +#define X_FINAL_CLEANUP_AGG_INT 1 + #define FW_MAJOR_VERSION 8 -#define FW_MINOR_VERSION 4 -#define FW_REVISION_VERSION 2 +#define FW_MINOR_VERSION 7 +#define FW_REVISION_VERSION 3 #define FW_ENGINEERING_VERSION 0 /***********************/ @@ -152,6 +154,9 @@ /* number of queues in a PF queue group */ #define QM_PF_QUEUE_GROUP_SIZE 8 +/* the size of a single queue element in bytes */ +#define QM_PQ_ELEMENT_SIZE 4 + /* base number of Tx PQs in the CM PQ representation. * should be used when storing PQ IDs in CM PQ registers and context */ @@ -285,6 +290,16 @@ #define PXP_NUM_ILT_RECORDS_K2 11000 #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) +#define SDM_COMP_TYPE_NONE 0 +#define SDM_COMP_TYPE_WAKE_THREAD 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define SDM_COMP_TYPE_CM 3 +#define SDM_COMP_TYPE_LOADER 4 +#define SDM_COMP_TYPE_PXP 5 +#define SDM_COMP_TYPE_INDICATE_ERROR 6 +#define SDM_COMP_TYPE_RELEASE_THREAD 7 +#define SDM_COMP_TYPE_RAM 8 + /******************/ /* PBF CONSTANTS */ /******************/ @@ -335,7 +350,7 @@ struct event_ring_entry { /* Multi function mode */ enum mf_mode { - SF, + ERROR_MODE /* Unsupported mode */, MF_OVLAN, MF_NPAR, MAX_MF_MODE @@ -606,4 +621,19 @@ struct status_block { #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 }; +struct tunnel_parsing_flags { + u8 flags; +#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; #endif /* __COMMON_HSI__ */ diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index 320b3373ac1d..092cb0c1afcb 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h @@ -17,10 +17,8 @@ #define ETH_MAX_RAMROD_PER_CON 8 #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 -#define ETH_RX_SGE_PAGE_SIZE_BYTES 4096 #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 #define ETH_RX_NUM_NEXT_PAGE_BDS 2 -#define ETH_RX_NUM_NEXT_PAGE_SGES 2 #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 @@ -34,7 +32,8 @@ #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS -#define ETH_REG_CQE_PBL_SIZE 3 +/* Maximum number of buffers, used for RX packet placement */ +#define ETH_RX_MAX_BUFF_PER_PKT 5 /* num of MAC/VLAN filters */ #define ETH_NUM_MAC_FILTERS 512 @@ -54,9 +53,9 @@ /* TPA constants */ #define ETH_TPA_MAX_AGGS_NUM 64 -#define ETH_TPA_CQE_START_SGL_SIZE 3 -#define ETH_TPA_CQE_CONT_SGL_SIZE 6 -#define ETH_TPA_CQE_END_SGL_SIZE 4 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 /* Queue Zone sizes */ #define TSTORM_QZONE_SIZE 0 @@ -74,18 +73,18 @@ struct coalescing_timeset { struct eth_tx_1st_bd_flags { u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 4 -#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 -#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 @@ -97,38 +96,44 @@ struct eth_tx_data_1st_bd { __le16 vlan; u8 nbds; struct eth_tx_1st_bd_flags bd_flags; - __le16 fw_use_only; + __le16 bitfields; +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 }; /* The parsing information data for the second tx bd of a given packet. */ struct eth_tx_data_2nd_bd { __le16 tunn_ip_size; - __le16 bitfields; -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF -#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 -#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 -#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 - __le16 bitfields2; + __le16 bitfields1; #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 -#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 10 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 14 -#define ETH_TX_DATA_2ND_BD_RESERVED1_MASK 0x1 -#define ETH_TX_DATA_2ND_BD_RESERVED1_SHIFT 15 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 }; /* Regular ETH Rx FP CQE. */ @@ -145,11 +150,68 @@ struct eth_fast_path_rx_reg_cqe { struct parsing_and_err_flags pars_flags; __le16 vlan_tag; __le32 rss_hash; - __le16 len_on_bd; + __le16 len_on_first_bd; u8 placement_offset; - u8 reserved; - __le16 pbl[ETH_REG_CQE_PBL_SIZE]; - u8 reserved1[10]; + struct tunnel_parsing_flags tunnel_pars_flags; + u8 bd_num; + u8 reserved[7]; + u32 fw_debug; + u8 reserved1[3]; + u8 flags; +#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2 +}; + +/* TPA-continue ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_cont_cqe { + u8 type; + u8 tpa_agg_index; + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; + u8 reserved[5]; + u8 reserved1; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE]; +}; + +/* TPA-end ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_end_cqe { + u8 type; + u8 tpa_agg_index; + __le16 total_packet_len; + u8 num_of_bds; + u8 end_reason; + __le16 num_of_coalesced_segs; + __le32 ts_delta; + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE]; + u8 reserved1[3]; + u8 reserved2; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE]; +}; + +/* TPA-start ETH Rx FP CQE. */ +struct eth_fast_path_rx_tpa_start_cqe { + u8 type; + u8 bitfields; +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 + __le16 seg_len; + struct parsing_and_err_flags pars_flags; + __le16 vlan_tag; + __le32 rss_hash; + __le16 len_on_first_bd; + u8 placement_offset; + struct tunnel_parsing_flags tunnel_pars_flags; + u8 tpa_agg_index; + u8 header_len; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; + u32 fw_debug; }; /* The L4 pseudo checksum mode for Ethernet */ @@ -168,13 +230,26 @@ struct eth_slow_path_rx_cqe { u8 type; u8 ramrod_cmd_id; u8 error_flag; - u8 reserved[27]; + u8 reserved[25]; __le16 echo; + u8 reserved1; + u8 flags; +/* for PMD mode - valid indication */ +#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0 +/* for PMD mode - valid toggle indication */ +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2 }; /* union for all ETH Rx CQE types */ union eth_rx_cqe { struct eth_fast_path_rx_reg_cqe fast_path_regular; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; struct eth_slow_path_rx_cqe slow_path; }; @@ -183,15 +258,18 @@ enum eth_rx_cqe_type { ETH_RX_CQE_TYPE_UNUSED, ETH_RX_CQE_TYPE_REGULAR, ETH_RX_CQE_TYPE_SLOW_PATH, + ETH_RX_CQE_TYPE_TPA_START, + ETH_RX_CQE_TYPE_TPA_CONT, + ETH_RX_CQE_TYPE_TPA_END, MAX_ETH_RX_CQE_TYPE }; /* ETH Rx producers data */ struct eth_rx_prod_data { __le16 bd_prod; - __le16 sge_prod; __le16 cqe_prod; __le16 reserved; + __le16 reserved1; }; /* The first tx bd of a given packet */ @@ -211,12 +289,17 @@ struct eth_tx_2nd_bd { /* The parsing information data for the third tx bd of a given packet. */ struct eth_tx_data_3rd_bd { __le16 lso_mss; - u8 bitfields; + __le16 bitfields; #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 - u8 resereved0[3]; +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 + u8 tunn_l4_hdr_start_offset_w; + u8 tunn_hdr_size_w; }; /* The third tx bd of a given packet */ @@ -226,12 +309,24 @@ struct eth_tx_3rd_bd { struct eth_tx_data_3rd_bd data; }; +/* Complementary information for the regular tx bd of a given packet. */ +struct eth_tx_data_bd { + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + /* The common non-special TX BD ring element */ struct eth_tx_bd { struct regpair addr; __le16 nbytes; - __le16 reserved0; - __le32 reserved1; + struct eth_tx_data_bd data; }; union eth_tx_bd_types { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index d4a32e878180..3d43c1d4ecef 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -80,7 +80,7 @@ struct qed_dev_info { u8 num_hwfns; u8 hw_mac[ETH_ALEN]; - bool is_mf; + bool is_mf_default; /* FW version */ u16 fw_major; @@ -360,6 +360,12 @@ enum DP_MODULE { /* to be added...up to 0x8000000 */ }; +enum qed_mf_mode { + QED_MF_DEFAULT, + QED_MF_OVLAN, + QED_MF_NPAR, +}; + struct qed_eth_stats { u64 no_buff_discards; u64 packet_too_big_discard; -- cgit v1.2.3 From a4f9cdb2b44d2dceede4eaad00c6b11206af0fea Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 15 Feb 2016 21:25:11 +0000 Subject: pasemi_mac: Replace LRO with GRO GRO is simpler to use than the old inet_lro library, and is compatible with forwarding and bridging configurations. Compile-tested only. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/net/ethernet/pasemi/Kconfig | 5 +-- drivers/net/ethernet/pasemi/pasemi_mac.c | 50 +----------------------- drivers/net/ethernet/pasemi/pasemi_mac.h | 4 -- drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c | 1 - 4 files changed, 3 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/pasemi/Kconfig b/drivers/net/ethernet/pasemi/Kconfig index db19c6f49859..7c92e8306c19 100644 --- a/drivers/net/ethernet/pasemi/Kconfig +++ b/drivers/net/ethernet/pasemi/Kconfig @@ -5,7 +5,7 @@ config NET_VENDOR_PASEMI bool "PA Semi devices" default y - depends on PPC_PASEMI && PCI && INET + depends on PPC_PASEMI && PCI ---help--- If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,9 +18,8 @@ if NET_VENDOR_PASEMI config PASEMI_MAC tristate "PA Semi 1/10Gbit MAC" - depends on PPC_PASEMI && PCI && INET + depends on PPC_PASEMI && PCI select PHYLIB - select INET_LRO ---help--- This driver supports the on-chip 1/10Gbit Ethernet controller on PA Semi's PWRficient line of chips. diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 57a6e6cd74fc..af54df52aa6b 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -30,9 +30,7 @@ #include #include -#include #include -#include #include #include @@ -52,12 +50,9 @@ * * - Multicast support * - Large MTU support - * - SW LRO * - Multiqueue RX/TX */ -#define LRO_MAX_AGGR 64 - #define PE_MIN_MTU 64 #define PE_MAX_MTU 9000 #define PE_DEF_MTU ETH_DATA_LEN @@ -257,37 +252,6 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p) return 0; } -static int get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *data) -{ - u64 macrx = (u64) data; - unsigned int ip_len; - struct iphdr *iph; - - /* IPv4 header checksum failed */ - if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK) - return -1; - - /* non tcp packet */ - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - /* check if ip header and tcp header are complete */ - if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb)) - return -1; - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - - return 0; -} - static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, const int nfrags, struct sk_buff *skb, @@ -817,7 +781,7 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, skb_put(skb, len-4); skb->protocol = eth_type_trans(skb, mac->netdev); - lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx); + napi_gro_receive(&mac->napi, skb); next: RX_DESC(rx, n) = 0; @@ -839,8 +803,6 @@ next: rx_ring(mac)->next_to_clean = n; - lro_flush_all(&mac->lro_mgr); - /* Increase is in number of 16-byte entries, and since each descriptor * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with * count*2. @@ -1754,16 +1716,6 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_GSO; - mac->lro_mgr.max_aggr = LRO_MAX_AGGR; - mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; - mac->lro_mgr.lro_arr = mac->lro_desc; - mac->lro_mgr.get_skb_header = get_skb_hdr; - mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - mac->lro_mgr.dev = mac->netdev; - mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; - - mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL); if (!mac->dma_pdev) { dev_err(&mac->pdev->dev, "Can't find DMA Controller\n"); diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h index a5807703ab96..161c99a98403 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.h +++ b/drivers/net/ethernet/pasemi/pasemi_mac.h @@ -31,7 +31,6 @@ #define CS_RING_SIZE (TX_RING_SIZE*2) -#define MAX_LRO_DESCRIPTORS 8 #define MAX_CS 2 struct pasemi_mac_txring { @@ -84,10 +83,7 @@ struct pasemi_mac { u8 mac_addr[ETH_ALEN]; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; struct timer_list rxtimer; - unsigned int lro_max_aggr; struct pasemi_mac_txring *tx; struct pasemi_mac_rxring *rx; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c index 25fae568261f..f046bfc18e7d 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include "pasemi_mac.h" -- cgit v1.2.3 From 63e5f9535fcf238b0779e6bcaf092e3f627e6942 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Mon, 15 Feb 2016 21:25:44 +0000 Subject: RDMA/nes: Replace LRO with GRO GRO is simpler to use than the old inet_lro library, and is compatible with forwarding and bridging configurations. Compile-tested only. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller --- drivers/infiniband/hw/nes/Kconfig | 1 - drivers/infiniband/hw/nes/nes_hw.c | 44 +------------------------------------ drivers/infiniband/hw/nes/nes_hw.h | 7 ------ drivers/infiniband/hw/nes/nes_nic.c | 7 ------ 4 files changed, 1 insertion(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/nes/Kconfig b/drivers/infiniband/hw/nes/Kconfig index 846dc97cf260..7964eba8e7ed 100644 --- a/drivers/infiniband/hw/nes/Kconfig +++ b/drivers/infiniband/hw/nes/Kconfig @@ -2,7 +2,6 @@ config INFINIBAND_NES tristate "NetEffect RNIC Driver" depends on PCI && INET && INFINIBAND select LIBCRC32C - select INET_LRO ---help--- This is the RDMA Network Interface Card (RNIC) driver for NetEffect Ethernet Cluster Server Adapters. diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 4713dd7ed764..a1c6481d8038 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -35,18 +35,11 @@ #include #include #include -#include -#include #include -#include #include #include "nes.h" -static unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR; -module_param(nes_lro_max_aggr, uint, 0444); -MODULE_PARM_DESC(nes_lro_max_aggr, "NIC LRO max packet aggregation"); - static int wide_ppm_offset; module_param(wide_ppm_offset, int, 0644); MODULE_PARM_DESC(wide_ppm_offset, "Increase CX4 interface clock ppm offset, 0=100ppm (default), 1=300ppm"); @@ -1642,25 +1635,6 @@ static void nes_rq_wqes_timeout(unsigned long parm) } -static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr, - void **tcph, u64 *hdr_flags, void *priv) -{ - unsigned int ip_len; - struct iphdr *iph; - skb_reset_network_header(skb); - iph = ip_hdr(skb); - if (iph->protocol != IPPROTO_TCP) - return -1; - ip_len = ip_hdrlen(skb); - skb_set_transport_header(skb, ip_len); - *tcph = tcp_hdr(skb); - - *hdr_flags = LRO_IPV4 | LRO_TCP; - *iphdr = iph; - return 0; -} - - /** * nes_init_nic_qp */ @@ -1895,14 +1869,6 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev) return -ENOMEM; } - nesvnic->lro_mgr.max_aggr = nes_lro_max_aggr; - nesvnic->lro_mgr.max_desc = NES_MAX_LRO_DESCRIPTORS; - nesvnic->lro_mgr.lro_arr = nesvnic->lro_desc; - nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr; - nesvnic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; - nesvnic->lro_mgr.dev = netdev; - nesvnic->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY; - nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; return 0; } @@ -2809,13 +2775,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) u16 pkt_type; u16 rqes_processed = 0; u8 sq_cqes = 0; - u8 nes_use_lro = 0; head = cq->cq_head; cq_size = cq->cq_size; cq->cqes_pending = 1; - if (nesvnic->netdev->features & NETIF_F_LRO) - nes_use_lro = 1; do { if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) & NES_NIC_CQE_VALID) { @@ -2950,10 +2913,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq) __vlan_hwaccel_put_tag(rx_skb, htons(ETH_P_8021Q), vlan_tag); } - if (nes_use_lro) - lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL); - else - netif_receive_skb(rx_skb); + napi_gro_receive(&nesvnic->napi, rx_skb); skip_rx_indicate0: ; @@ -2984,8 +2944,6 @@ skip_rx_indicate0: } while (1); - if (nes_use_lro) - lro_flush_all(&nesvnic->lro_mgr); if (sq_cqes) { barrier(); /* restart the queue if it had been stopped */ diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h index c9080208aad2..1b66ef1e9937 100644 --- a/drivers/infiniband/hw/nes/nes_hw.h +++ b/drivers/infiniband/hw/nes/nes_hw.h @@ -33,8 +33,6 @@ #ifndef __NES_HW_H #define __NES_HW_H -#include - #define NES_PHY_TYPE_CX4 1 #define NES_PHY_TYPE_1G 2 #define NES_PHY_TYPE_ARGUS 4 @@ -1049,8 +1047,6 @@ struct nes_hw_tune_timer { #define NES_TIMER_ENABLE_LIMIT 4 #define NES_MAX_LINK_INTERRUPTS 128 #define NES_MAX_LINK_CHECK 200 -#define NES_MAX_LRO_DESCRIPTORS 32 -#define NES_LRO_MAX_AGGR 64 struct nes_adapter { u64 fw_ver; @@ -1263,9 +1259,6 @@ struct nes_vnic { u8 next_qp_nic_index; u8 of_device_registered; u8 rdma_enabled; - u32 lro_max_aggr; - struct net_lro_mgr lro_mgr; - struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS]; struct timer_list event_timer; enum ib_event_type delayed_event; enum ib_event_type last_dispatched_event; diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 6a0bdfa0ce2e..3ea9e055fdd3 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1085,9 +1085,6 @@ static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = { "Free 4Kpbls", "Free 256pbls", "Timer Inits", - "LRO aggregated", - "LRO flushed", - "LRO no_desc", "PAU CreateQPs", "PAU DestroyQPs", }; @@ -1302,9 +1299,6 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev, target_stat_values[++index] = nesadapter->free_4kpbl; target_stat_values[++index] = nesadapter->free_256pbl; target_stat_values[++index] = int_mod_timer_init; - target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated; - target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed; - target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc; target_stat_values[++index] = atomic_read(&pau_qps_created); target_stat_values[++index] = atomic_read(&pau_qps_destroyed); } @@ -1709,7 +1703,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev, netdev->hw_features |= NETIF_F_TSO; netdev->features = netdev->hw_features | NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX; - netdev->hw_features |= NETIF_F_LRO; nes_debug(NES_DBG_INIT, "nesvnic = %p, reported features = 0x%lX, QPid = %d," " nic_index = %d, logical_port = %d, mac_index = %d.\n", -- cgit v1.2.3 From 2818ccd95684d39cc3bdad579e02ae56c0d6de88 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Jan 2016 16:51:38 -0800 Subject: i40e: move sync_vsi_filters up in service_task The sync_vsi_filters function is moved up in the service_task because it may need to request a reset, and we don't want to wait another round of service task time. NOTE: Filters will be replayed by sync_vsi_filters including broadcast and promiscuous settings. Also, added some error handling in this space in case any of these fail the driver will retry correctly. Also update copyright year. Change-ID: I23f3d552100baecea69466339f738f27614efd47 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 04417e61b523..e974db32975b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -2168,6 +2168,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } } out: + /* if something went wrong then set the changed flag so we try again */ + if (retval) + vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; + clear_bit(__I40E_CONFIG_BUSY, &vsi->state); return retval; } @@ -7113,6 +7117,7 @@ static void i40e_service_task(struct work_struct *work) } i40e_detect_recover_hung(pf); + i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); i40e_vc_process_vflr_event(pf); -- cgit v1.2.3 From f1bbad339cdd74cea945e33d758200d42310be17 Mon Sep 17 00:00:00 2001 From: Neerav Parikh Date: Wed, 13 Jan 2016 16:51:39 -0800 Subject: i40e: Make the DCB firmware checks for X710/XL710 only Make the DCB firmware version related checks specific to X710 and XL710 only. These checks are not required for X722 family of devices. Introduced an inline routine to help determine if the MAC type is X710/XL710 or not. Moved the firmware version related checks in i40e_sw_init() and defined flags for different cases Fix the version check to allow using "Set LLDP MIB" AQ for beyond FVL4 FW releases. Change-ID: Ib78288343de983aa0354fc28aa36e99b073662c0 Signed-off-by: Neerav Parikh Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 16 ++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_main.c | 27 ++++++++++++++++++++------- 2 files changed, 36 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 05af33e1d810..7bfd062fbadc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -138,6 +138,19 @@ /* default to trying for four seconds */ #define I40E_TRY_LINK_TIMEOUT (4 * HZ) +/** + * i40e_is_mac_710 - Return true if MAC is X710/XL710 + * @hw: ptr to the hardware info + **/ +static inline bool i40e_is_mac_710(struct i40e_hw *hw) +{ + if ((hw->mac.type == I40E_MAC_X710) || + (hw->mac.type == I40E_MAC_XL710)) + return true; + + return false; +} + /* driver state flags */ enum i40e_state_t { __I40E_TESTING, @@ -342,6 +355,9 @@ struct i40e_pf { #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) #define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) #define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) +#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) +#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) +#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) #define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e974db32975b..81b789587037 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5012,8 +5012,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) int err = 0; /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4)) + if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT) goto out; /* Get the initial DCB configuration */ @@ -8425,11 +8424,25 @@ static int i40e_sw_init(struct i40e_pf *pf) pf->hw.func_caps.fd_filters_best_effort; } - if (((pf->hw.mac.type == I40E_MAC_X710) || - (pf->hw.mac.type == I40E_MAC_XL710)) && + if (i40e_is_mac_710(&pf->hw) && (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || - (pf->hw.aq.fw_maj_ver < 4))) + (pf->hw.aq.fw_maj_ver < 4))) { pf->flags |= I40E_FLAG_RESTART_AUTONEG; + /* No DCB support for FW < v4.33 */ + pf->flags |= I40E_FLAG_NO_DCB_SUPPORT; + } + + /* Disable FW LLDP if FW < v4.3 */ + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || + (pf->hw.aq.fw_maj_ver < 4))) + pf->flags |= I40E_FLAG_STOP_FW_LLDP; + + /* Use the FW Set LLDP MIB API if FW > v4.40 */ + if (i40e_is_mac_710(&pf->hw) && + (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) || + (pf->hw.aq.fw_maj_ver >= 5))) + pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB; if (pf->hw.func_caps.vmdq) { pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; @@ -8458,6 +8471,7 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | I40E_FLAG_100M_SGMII_CAPABLE | + I40E_FLAG_USE_SET_LLDP_MIB | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; } else if ((pf->hw.aq.api_maj_ver > 1) || ((pf->hw.aq.api_maj_ver == 1) && @@ -10825,8 +10839,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * Ignore error return codes because if it was already disabled via * hardware settings this will fail */ - if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || - (pf->hw.aq.fw_maj_ver < 4)) { + if (pf->flags & I40E_FLAG_STOP_FW_LLDP) { dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); i40e_aq_stop_lldp(hw, true, NULL); } -- cgit v1.2.3 From 67be6eb2473ac86e71f68c9470f8e00f39df7258 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 13 Jan 2016 16:51:40 -0800 Subject: i40e: set shared bit for multicast filters Add the use of the new Shared MAC filter bit for multicast and broadcast filters in order to make better use of the filters available from the device. The FW folks have assured me that setting this bit on older FW will have no affect, so we don't need a version check. Also fixed a stray indent problem nearby. Also update copyright year. Change-ID: I4c5826a32594382a7937a592a24d228588cee7aa Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 976b03fe5b19..edfea38abc73 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -2435,6 +2435,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, (struct i40e_aqc_macvlan *)&desc.params.raw; i40e_status status; u16 buf_size; + int i; if (count == 0 || !mv_list || !hw) return I40E_ERR_PARAM; @@ -2448,12 +2449,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, cmd->seid[1] = 0; cmd->seid[2] = 0; + for (i = 0; i < count; i++) + if (is_multicast_ether_addr(mv_list[i].mac_addr)) + mv_list[i].flags |= + cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC); + desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); if (buf_size > I40E_AQ_LARGE_BUF) desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, - cmd_details); + cmd_details); return status; } -- cgit v1.2.3 From 8a187f448ede55147dfa2fdcacfacd795747972d Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 13 Jan 2016 16:51:41 -0800 Subject: i40e: add VEB stat control and remove L2 cloud filter With the latest firmware, statistics gathering can now be enabled and disabled in the HW switch, so we need to add a parameter to allow the driver to set it as desired. At the same time, the L2 cloud filtering parameter has been removed as it was never used. Older drivers working with the newer firmware and newer drivers working with older firmware will not run into problems with these bits as the defaults are reasonable and there is no overlap in the bit definitions. Also, newer drivers will be forced to update because of the change in function call parameters, a reminder that the functionality exists. Also update copyright year. Change-ID: I9acb9160b892ca3146f2f11a88fdcd86be3cadcc Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 11 ++++++----- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 6 +++--- 3 files changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index edfea38abc73..354e36cf2fff 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2308,8 +2308,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) * @downlink_seid: the VSI SEID * @enabled_tc: bitmap of TCs to be enabled * @default_port: true for default port VSI, false for control port - * @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support * @veb_seid: pointer to where to put the resulting VEB SEID + * @enable_stats: true to turn on VEB stats * @cmd_details: pointer to command details structure or NULL * * This asks the FW to add a VEB between the uplink and downlink @@ -2317,8 +2317,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) **/ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, bool enable_l2_filtering, - u16 *veb_seid, + bool default_port, u16 *veb_seid, + bool enable_stats, struct i40e_asq_cmd_details *cmd_details) { struct i40e_aq_desc desc; @@ -2345,8 +2345,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, else veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; - if (enable_l2_filtering) - veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; + /* reverse logic here: set the bitflag to disable the stats */ + if (!enable_stats) + veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS; cmd->veb_flags = cpu_to_le16(veb_flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 81b789587037..95fb34254ce6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10075,7 +10075,7 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, - is_cloud, &veb->seid, NULL); + &veb->seid, is_cloud, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index 45af29b8f023..e8deabde82b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -138,8 +138,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc, - bool default_port, bool enable_l2_filtering, - u16 *pveb_seid, + bool default_port, u16 *pveb_seid, + bool enable_stats, struct i40e_asq_cmd_details *cmd_details); i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, u16 veb_seid, u16 *switch_id, bool *floating, -- cgit v1.2.3 From 66fc360a0e9dd17e2c6ea02c15b02590b583abd1 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 13 Jan 2016 16:51:42 -0800 Subject: i40e: use new add_veb calling with VEB stats control The new parameters for add_veb allow us to enable and disable VEB stats, so let's use them. Update copyright year. Change-ID: Ie6e68c68e2d1d459e42168eda661051b56bf0a65 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 11 ++++++++--- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 ++-- 2 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 89ad2f749918..230fa402d2da 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -2785,10 +2785,15 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; } - if (flags & I40E_PRIV_FLAGS_VEB_STATS) + if ((flags & I40E_PRIV_FLAGS_VEB_STATS) && + !(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; - else + reset_required = true; + } else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) && + (pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) { pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; + reset_required = true; + } if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 95fb34254ce6..0acec51cea1b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10069,13 +10069,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) { struct i40e_pf *pf = veb->pf; bool is_default = veb->pf->cur_promisc; - bool is_cloud = false; + bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED); int ret; /* get a VEB from the hardware */ ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, veb->enabled_tc, is_default, - &veb->seid, is_cloud, NULL); + &veb->seid, enable_stats, NULL); if (ret) { dev_info(&pf->pdev->dev, "couldn't add VEB, err %s aq_err %s\n", -- cgit v1.2.3 From ecc6a239e8dd233951bd3337f34d9ba4c46720c9 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 13 Jan 2016 16:51:43 -0800 Subject: i40e: Refactor force_wb and WB_ON_ITR functionality code Now that the Force-WriteBack functionality in X710/XL710 devices has been moved out of the clean routine and into the service task, we need to make sure WriteBack-On-ITR is separated out since it is still called from clean. In the X722 devices, Force-WriteBack implies WriteBack-On-ITR but without the interrupt, which put the driver into a missed interrupt scenario and a potential tx-timeout report. With this patch, we break the two functions out, and call the appropriate ones at the right place. This will avoid creating missed interrupt like scenarios for X722 devices. Also update copyright year in file headers. Change-ID: Iacbde39f95f332f82be8736864675052c3583a40 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 57 ++++++++++++++---------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 63 +++++++++++++++------------ drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 3 +- 3 files changed, 72 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6d1dd60c5c91..7dfd45e90e64 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -774,37 +774,48 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } /** - * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback + * @q_vector: the vector on which to enable writeback * **/ -void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; + u32 val; - if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { - u32 val; + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; - if (q_vector->arm_wb_state) - return; + if (q_vector->arm_wb_state) + return; - if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { - val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | - I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ - wr32(&vsi->back->hw, - I40E_PFINT_DYN_CTLN(q_vector->v_idx + - vsi->base_vector - 1), - val); - } else { - val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | - I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), + val); + } else { + val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | + I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */ - wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); - } - q_vector->arm_wb_state = true; - } else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { + wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); + } + q_vector->arm_wb_state = true; +} + +/** + * i40e_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | @@ -1946,7 +1957,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40e_force_wb(vsi, q_vector); + i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 4205aef2ffca..1c62cf578e1b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -292,40 +292,49 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } /** - * i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors + * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled * @vsi: the VSI we care about - * @q_vector: the vector on which to force writeback + * @q_vector: the vector on which to enable writeback * **/ -static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi, + struct i40e_q_vector *q_vector) { u16 flags = q_vector->tx.ring[0].flags; + u32 val; - if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { - u32 val; + if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR)) + return; - if (q_vector->arm_wb_state) - return; + if (q_vector->arm_wb_state) + return; - val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ + val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), - val); - q_vector->arm_wb_state = true; - } else { - u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ - I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | - I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; - /* allow 00 to be written to the index */ - - wr32(&vsi->back->hw, - I40E_VFINT_DYN_CTLN1(q_vector->v_idx + - vsi->base_vector - 1), val); - } + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + + vsi->base_vector - 1), val); + q_vector->arm_wb_state = true; +} + +/** + * i40evf_force_wb - Issue SW Interrupt so HW does a wb + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | + I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ + I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | + I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK + /* allow 00 to be written to the index */; + + wr32(&vsi->back->hw, + I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1), + val); } /** @@ -1384,7 +1393,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) tx_only: if (arm_wb) { q_vector->tx.ring[0].tx_stats.tx_force_wb++; - i40evf_force_wb(vsi, q_vector); + i40e_enable_wb_on_itr(vsi, q_vector); } return budget; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index e29bb3e86cfd..da701c56eaa2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -324,6 +324,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); +void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40evf_get_tx_pending(struct i40e_ring *ring); /** -- cgit v1.2.3 From eaab59e9d2908df1c31d94f2da1aa3ebeebd8381 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Wed, 13 Jan 2016 16:51:44 -0800 Subject: i40evf: Change vf driver string to reflect all products i40evf supports Change the driver string to 40-10 Gigabit instead of XL710/X710 for X722 and all future products. Also update copyright year in file header. Change-ID: I57fae656b36dc4eb682b2b7a054f8f48f3589149 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 045cc7fb4623..faa1bca88699 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -32,7 +32,7 @@ static int i40evf_close(struct net_device *netdev); char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = - "Intel(R) XL710/X710 Virtual Function Network Driver"; + "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; #define DRV_KERN "-k" -- cgit v1.2.3 From 40d72a50986271e5c30f111e08ebd6d846708bbc Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Jan 2016 16:51:45 -0800 Subject: i40e/i40evf: don't lose interrupts While re-enabling interrupts the driver would clear all pending causes. This meant that if an interrupt was generated while the driver was cleaning or polling with interrupts disabled, then that interrupt was lost. This could cause a queue to become dead, especially for receive. Refactored the enable_icr0 function in order to allow it to be decided by the caller whether the CLEARPBA (clear pending events) bit will be set while re-enabling the interrupt. Also update copyright year in file headers. Change-ID: Ic1db100a05e13c98919057696db147a258ca365a Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 7 +++++-- drivers/net/ethernet/intel/i40e/i40e_main.c | 11 ++++++----- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 6 ++++-- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 4 ++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 4 +++- 5 files changed, 20 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 7bfd062fbadc..5ea431d04e8c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -767,6 +767,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) struct i40e_hw *hw = &pf->hw; u32 val; + /* definitely clear the PBA here, as this function is meant to + * clean out all previous interrupts AND enable the interrupt + */ val = I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); @@ -775,7 +778,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) } void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba); #ifdef I40E_FCOE struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0acec51cea1b..8bc848f66371 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3257,14 +3257,15 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) /** * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * @pf: board private structure + * @clearpba: true when all pending interrupt events should be cleared **/ -void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) +void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba) { struct i40e_hw *hw = &pf->hw; u32 val; val = I40E_PFINT_DYN_CTL0_INTENA_MASK | - I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | + (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) | (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); wr32(hw, I40E_PFINT_DYN_CTL0, val); @@ -3396,7 +3397,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) for (i = 0; i < vsi->num_q_vectors; i++) i40e_irq_dynamic_enable(vsi, i); } else { - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, true); } i40e_flush(&pf->hw); @@ -3542,7 +3543,7 @@ enable_intr: wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_service_event_schedule(pf); - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, false); } return ret; @@ -7858,7 +7859,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) i40e_flush(hw); - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, true); return err; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 7dfd45e90e64..353e5a0d0f50 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1810,7 +1810,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_PFINT_DYN_CTLN_INTENA_MASK | - I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); @@ -1983,7 +1985,7 @@ tx_only: qval = rd32(hw, I40E_QINT_TQCTL(0)) | I40E_QINT_TQCTL_CAUSE_ENA_MASK; wr32(hw, I40E_QINT_TQCTL(0), qval); - i40e_irq_dynamic_enable_icr0(vsi->back); + i40e_irq_dynamic_enable_icr0(vsi->back, false); } return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 1635c7a0e505..3e0d87e3ff3a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -980,7 +980,7 @@ err_alloc: i40e_free_vfs(pf); err_iov: /* Re-enable interrupt 0. */ - i40e_irq_dynamic_enable_icr0(pf); + i40e_irq_dynamic_enable_icr0(pf, false); return ret; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 1c62cf578e1b..621042481a78 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1248,7 +1248,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) u32 val; val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | - I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | + /* Don't clear PBA because that can cause lost interrupts that + * came in while we were cleaning/polling + */ (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); -- cgit v1.2.3 From c2e245ab1e9a61e66217aafea66c7dc6481f12f0 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Jan 2016 16:51:46 -0800 Subject: i40e/i40evf: try again after failure This is the "Don't Give Up" patch. Previously the driver could fail an allocation, and then possibly stall a queue forever, by never coming back to continue receiving or allocating buffers. With this patch, the driver will keep polling trying to allocate receive buffers until it succeeds. This should keep all receive queues running even in the face of memory pressure. Also update copyright year in file header. Change-ID: I2b103d1ce95b9831288a7222c3343ffa1988b81b Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 51 ++++++++++++++++++++++----- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 6 ++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 51 ++++++++++++++++++++++----- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 4 +-- 4 files changed, 89 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 353e5a0d0f50..8049206206f6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1195,8 +1195,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1204,7 +1206,7 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -1251,17 +1253,29 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -1270,7 +1284,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -1297,6 +1311,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) if (dma_mapping_error(rx_ring->dev, bi->dma)) { rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; + dev_kfree_skb(bi->skb); + bi->skb = NULL; goto no_buffers; } } @@ -1308,9 +1324,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** @@ -1494,7 +1520,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -1504,6 +1530,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + bool failure = false; u8 rx_ptype; u64 qword; @@ -1516,7 +1543,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); + failure = failure || + i40e_alloc_rx_buffers_ps(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1546,6 +1575,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->rx_hdr_len); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; + failure = true; break; } @@ -1675,7 +1705,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } /** @@ -1693,6 +1723,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; u16 rx_packet_len; + bool failure = false; u8 rx_ptype; u64 qword; u16 i; @@ -1703,7 +1734,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + failure = failure || + i40e_alloc_rx_buffers_1buf(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1802,7 +1835,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3f081e25e097..5c73f3d294b2 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -316,8 +316,8 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); void i40e_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40e_clean_tx_ring(struct i40e_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 621042481a78..616daae95e58 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -667,8 +667,10 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -676,7 +678,7 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -723,17 +725,29 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * @rx_ring: ring to place buffers on * @cleaned_count: number of buffers to replace + * + * Returns true if any errors on allocation **/ -void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) +bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) { u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; @@ -742,7 +756,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) - return; + return false; while (cleaned_count--) { rx_desc = I40E_RX_DESC(rx_ring, i); @@ -769,6 +783,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) if (dma_mapping_error(rx_ring->dev, bi->dma)) { rx_ring->rx_stats.alloc_buff_failed++; bi->dma = 0; + dev_kfree_skb(bi->skb); + bi->skb = NULL; goto no_buffers; } } @@ -780,9 +796,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) i = 0; } + if (rx_ring->next_to_use != i) + i40e_release_rx_desc(rx_ring, i); + + return false; + no_buffers: if (rx_ring->next_to_use != i) i40e_release_rx_desc(rx_ring, i); + + /* make sure to come back via polling to try again after + * allocation failure + */ + return true; } /** @@ -965,7 +991,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, * * Returns true if there's any budget left (e.g. the clean is finished) **/ -static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) +static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; @@ -975,6 +1001,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; + bool failure = false; u8 rx_ptype; u64 qword; @@ -984,7 +1011,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); + failure = failure || + i40evf_alloc_rx_buffers_ps(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1009,6 +1038,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->rx_hdr_len); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; + failure = true; break; } @@ -1131,7 +1161,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } /** @@ -1149,6 +1179,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) union i40e_rx_desc *rx_desc; u32 rx_error, rx_status; u16 rx_packet_len; + bool failure = false; u8 rx_ptype; u64 qword; u16 i; @@ -1159,7 +1190,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) u16 vlan_tag; /* return some buffers to hardware, one at a time is too slow */ if (cleaned_count >= I40E_RX_BUFFER_WRITE) { - i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); + failure = failure || + i40evf_alloc_rx_buffers_1buf(rx_ring, + cleaned_count); cleaned_count = 0; } @@ -1240,7 +1273,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_bytes += total_rx_bytes; - return total_rx_packets; + return failure ? budget : total_rx_packets; } static u32 i40e_buildreg_itr(const int type, const u16 itr) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index da701c56eaa2..d8071ba43c42 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -313,8 +313,8 @@ struct i40e_ring_container { #define i40e_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); -void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); +bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); +bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); void i40evf_alloc_rx_headers(struct i40e_ring *rxr); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); -- cgit v1.2.3 From 13cb3e9d71d5823a3169b10b428ce717434c657e Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Jan 2016 16:51:47 -0800 Subject: i40e: dump descriptor indexes in hex The debugging helpers for showing descriptor rings were dumping the indexes in decimal and the offsets in hex. Put everything in hex and at least be consistent. Also update copyright year in file header. Change-ID: Ia35a21411a2ddb713772dffb4e8718889fcfc895 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 3948587a047c..fcae3c8923ce 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -825,20 +825,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (!is_rx_ring) { txd = I40E_TX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx\n", i, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, i); dev_info(&pf->pdev->dev, - " d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", i, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); @@ -853,20 +853,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (!is_rx_ring) { txd = I40E_TX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, txd->buffer_addr, txd->cmd_type_offset_bsz); } else if (sizeof(union i40e_rx_desc) == sizeof(union i40e_16byte_rx_desc)) { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", + "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr); } else { rxd = I40E_RX_DESC(ring, desc_n); dev_info(&pf->pdev->dev, - "vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", + "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", vsi_seid, ring_id, desc_n, rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.rsvd1, rxd->read.rsvd2); -- cgit v1.2.3 From dd1a5df872d7889b37b5ce3058a9f2e20d4d8b56 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Jan 2016 16:51:48 -0800 Subject: i40e/i40evf: use __GFP_NOWARN The i40e and i40evf drivers now cleanly handle allocation failures and can avoid kernel log spew from the memory allocator when allocations fail, so set __GFP_NOWARN on Rx buffer alloc. Change-ID: Ic9e1b83c495e2a3ef6b069ba7fb6e52ce134cd23 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 ++++++++---- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 12 ++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 8049206206f6..baaf0939a913 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1292,8 +1292,10 @@ bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; @@ -1571,8 +1573,10 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; failure = true; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 616daae95e58..1dbdcf8e0710 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -764,8 +764,10 @@ bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) skb = bi->skb; if (!skb) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_buf_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_buf_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; goto no_buffers; @@ -1034,8 +1036,10 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { - skb = netdev_alloc_skb_ip_align(rx_ring->netdev, - rx_ring->rx_hdr_len); + skb = __netdev_alloc_skb_ip_align(rx_ring->netdev, + rx_ring->rx_hdr_len, + GFP_ATOMIC | + __GFP_NOWARN); if (!skb) { rx_ring->rx_stats.alloc_buff_failed++; failure = true; -- cgit v1.2.3 From f16704e5e8aed1dfed4084c56dde17006c2e81f1 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 13 Jan 2016 16:51:49 -0800 Subject: i40e/i40evf: use pages correctly in Rx Refactor the packet split Rx code to properly use half-pages for receives. The previous code was doing way more mapping and unmapping than it needed to, and wasn't properly using half-pages. Increment the page use count each time we give a half-page to an skb, knowing that the stack will probably process and release the page before we need it again. Only free and reallocate pages if the count shows that both half-pages are in use. Add counters to track reallocations and page reuse. Change-ID: I534b299196036b64be82b4861a0a4036310a8f22 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 5 ++ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 118 ++++++++++++++++--------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 + drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 118 ++++++++++++++++--------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 2 + 5 files changed, 159 insertions(+), 86 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index fcae3c8923ce..bdac69185fef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -535,6 +535,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) i, rx_ring->rx_stats.alloc_page_failed, rx_ring->rx_stats.alloc_buff_failed); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n", + i, + rx_ring->rx_stats.realloc_count, + rx_ring->rx_stats.page_reuse_count); dev_info(&pf->pdev->dev, " rx_rings[%i]: size = %i, dma = 0x%08lx\n", i, rx_ring->size, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index baaf0939a913..1abef01e5a2c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1060,7 +1060,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) if (rx_bi->page_dma) { dma_unmap_page(dev, rx_bi->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE, DMA_FROM_DEVICE); rx_bi->page_dma = 0; } @@ -1203,6 +1203,7 @@ bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + const int current_node = numa_node_id(); /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -1214,39 +1215,50 @@ bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) if (bi->skb) /* desc is in use */ goto no_buffers; + + /* If we've been moved to a different NUMA node, release the + * page so we can get a new one on the current node. + */ + if (bi->page && page_to_nid(bi->page) != current_node) { + dma_unmap_page(rx_ring->dev, + bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(bi->page); + bi->page = NULL; + bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else if (bi->page) { + rx_ring->rx_stats.page_reuse_count++; + } + if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; bi->page_dma = dma_map_page(rx_ring->dev, bi->page, - bi->page_offset, - PAGE_SIZE / 2, + 0, + PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { rx_ring->rx_stats.alloc_page_failed++; + __free_page(bi->page); + bi->page = NULL; bi->page_dma = 0; + bi->page_offset = 0; goto no_buffers; } + bi->page_offset = 0; } - dma_sync_single_range_for_device(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) @@ -1527,7 +1539,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; @@ -1535,6 +1546,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) bool failure = false; u8 rx_ptype; u64 qword; + u32 copysize; if (budget <= 0) return 0; @@ -1565,6 +1577,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) * DD bit is set. */ dma_rmb(); + /* sync header buffer for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); if (i40e_rx_is_programming_status(qword)) { i40e_clean_programming_status(rx_ring, rx_desc); I40E_RX_INCREMENT(rx_ring, i); @@ -1606,9 +1624,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - prefetch(rx_bi->page); + /* sync half-page for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->page_dma, + rx_bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + prefetch(page_address(rx_bi->page) + rx_bi->page_offset); rx_bi->skb = NULL; cleaned_count++; + copysize = 0; if (rx_hbo || rx_sph) { int len; @@ -1619,38 +1644,45 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); } else if (skb->len == 0) { int len; + unsigned char *va = page_address(rx_bi->page) + + rx_bi->page_offset; - len = (rx_packet_len > skb_headlen(skb) ? - skb_headlen(skb) : rx_packet_len); - memcpy(__skb_put(skb, len), - rx_bi->page + rx_bi->page_offset, - len); - rx_bi->page_offset += len; + len = min(rx_packet_len, rx_ring->rx_hdr_len); + memcpy(__skb_put(skb, len), va, len); + copysize = len; rx_packet_len -= len; } - /* Get the rest of the data if this was a header split */ if (rx_packet_len) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset, - rx_packet_len); - - skb->len += rx_packet_len; - skb->data_len += rx_packet_len; - skb->truesize += rx_packet_len; - - if ((page_count(rx_bi->page) == 1) && - (page_to_nid(rx_bi->page) == current_node)) - get_page(rx_bi->page); - else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset + copysize, + rx_packet_len, I40E_RXBUFFER_2048); + + get_page(rx_bi->page); + /* switch to the other half-page here; the allocation + * code programs the right addr into HW. If we haven't + * used this half-page, the address won't be changed, + * and HW can just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; + /* If the page count is more than 2, then both halves + * of the page are used and we need to free it. Do it + * here instead of in the alloc code. Otherwise one + * of the half-pages might be released between now and + * then, and we wouldn't know which one to use. + */ + if (page_count(rx_bi->page) > 2) { + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(rx_bi->page); rx_bi->page = NULL; + rx_bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; } I40E_RX_INCREMENT(rx_ring, i); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 5c73f3d294b2..3b8d14701613 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -209,6 +209,8 @@ struct i40e_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; }; enum i40e_ring_state_t { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 1dbdcf8e0710..6f739a7bc271 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -532,7 +532,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) if (rx_bi->page_dma) { dma_unmap_page(dev, rx_bi->page_dma, - PAGE_SIZE / 2, + PAGE_SIZE, DMA_FROM_DEVICE); rx_bi->page_dma = 0; } @@ -675,6 +675,7 @@ bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) u16 i = rx_ring->next_to_use; union i40e_rx_desc *rx_desc; struct i40e_rx_buffer *bi; + const int current_node = numa_node_id(); /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) @@ -686,39 +687,50 @@ bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) if (bi->skb) /* desc is in use */ goto no_buffers; + + /* If we've been moved to a different NUMA node, release the + * page so we can get a new one on the current node. + */ + if (bi->page && page_to_nid(bi->page) != current_node) { + dma_unmap_page(rx_ring->dev, + bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(bi->page); + bi->page = NULL; + bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } else if (bi->page) { + rx_ring->rx_stats.page_reuse_count++; + } + if (!bi->page) { bi->page = alloc_page(GFP_ATOMIC); if (!bi->page) { rx_ring->rx_stats.alloc_page_failed++; goto no_buffers; } - } - - if (!bi->page_dma) { - /* use a half page if we're re-using */ - bi->page_offset ^= PAGE_SIZE / 2; bi->page_dma = dma_map_page(rx_ring->dev, bi->page, - bi->page_offset, - PAGE_SIZE / 2, + 0, + PAGE_SIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(rx_ring->dev, - bi->page_dma)) { + if (dma_mapping_error(rx_ring->dev, bi->page_dma)) { rx_ring->rx_stats.alloc_page_failed++; + __free_page(bi->page); + bi->page = NULL; bi->page_dma = 0; + bi->page_offset = 0; goto no_buffers; } + bi->page_offset = 0; } - dma_sync_single_range_for_device(rx_ring->dev, - rx_ring->rx_bi[0].dma, - i * rx_ring->rx_hdr_len, - rx_ring->rx_hdr_len, - DMA_FROM_DEVICE); /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); + rx_desc->read.pkt_addr = + cpu_to_le64(bi->page_dma + bi->page_offset); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); i++; if (i == rx_ring->count) @@ -998,7 +1010,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); - const int current_node = numa_mem_id(); struct i40e_vsi *vsi = rx_ring->vsi; u16 i = rx_ring->next_to_clean; union i40e_rx_desc *rx_desc; @@ -1006,6 +1017,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) bool failure = false; u8 rx_ptype; u64 qword; + u32 copysize; do { struct i40e_rx_buffer *rx_bi; @@ -1033,6 +1045,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) * DD bit is set. */ dma_rmb(); + /* sync header buffer for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_ring->rx_bi[0].dma, + i * rx_ring->rx_hdr_len, + rx_ring->rx_hdr_len, + DMA_FROM_DEVICE); rx_bi = &rx_ring->rx_bi[i]; skb = rx_bi->skb; if (likely(!skb)) { @@ -1069,9 +1087,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT; - prefetch(rx_bi->page); + /* sync half-page for reading */ + dma_sync_single_range_for_cpu(rx_ring->dev, + rx_bi->page_dma, + rx_bi->page_offset, + PAGE_SIZE / 2, + DMA_FROM_DEVICE); + prefetch(page_address(rx_bi->page) + rx_bi->page_offset); rx_bi->skb = NULL; cleaned_count++; + copysize = 0; if (rx_hbo || rx_sph) { int len; @@ -1082,38 +1107,45 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); } else if (skb->len == 0) { int len; + unsigned char *va = page_address(rx_bi->page) + + rx_bi->page_offset; - len = (rx_packet_len > skb_headlen(skb) ? - skb_headlen(skb) : rx_packet_len); - memcpy(__skb_put(skb, len), - rx_bi->page + rx_bi->page_offset, - len); - rx_bi->page_offset += len; + len = min(rx_packet_len, rx_ring->rx_hdr_len); + memcpy(__skb_put(skb, len), va, len); + copysize = len; rx_packet_len -= len; } - /* Get the rest of the data if this was a header split */ if (rx_packet_len) { - skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, - rx_bi->page, - rx_bi->page_offset, - rx_packet_len); - - skb->len += rx_packet_len; - skb->data_len += rx_packet_len; - skb->truesize += rx_packet_len; - - if ((page_count(rx_bi->page) == 1) && - (page_to_nid(rx_bi->page) == current_node)) - get_page(rx_bi->page); - else + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + rx_bi->page, + rx_bi->page_offset + copysize, + rx_packet_len, I40E_RXBUFFER_2048); + + get_page(rx_bi->page); + /* switch to the other half-page here; the allocation + * code programs the right addr into HW. If we haven't + * used this half-page, the address won't be changed, + * and HW can just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; + /* If the page count is more than 2, then both halves + * of the page are used and we need to free it. Do it + * here instead of in the alloc code. Otherwise one + * of the half-pages might be released between now and + * then, and we wouldn't know which one to use. + */ + if (page_count(rx_bi->page) > 2) { + dma_unmap_page(rx_ring->dev, + rx_bi->page_dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + __free_page(rx_bi->page); rx_bi->page = NULL; + rx_bi->page_dma = 0; + rx_ring->rx_stats.realloc_count++; + } - dma_unmap_page(rx_ring->dev, - rx_bi->page_dma, - PAGE_SIZE / 2, - DMA_FROM_DEVICE); - rx_bi->page_dma = 0; } I40E_RX_INCREMENT(rx_ring, i); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index d8071ba43c42..5f03c444c89b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -208,6 +208,8 @@ struct i40e_rx_queue_stats { u64 non_eop_descs; u64 alloc_page_failed; u64 alloc_buff_failed; + u64 page_reuse_count; + u64 realloc_count; }; enum i40e_ring_state_t { -- cgit v1.2.3 From 1a36d7fadd1477761dc8b4fc8910d1defc31fad5 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 13 Jan 2016 16:51:50 -0800 Subject: i40e/i40evf: use logical operators, not bitwise Mr. Spock would certainly raise an eyebrow to see us using bitwise operators, when we should clearly be relying on logic. Fascinating. Change-ID: Ie338010c016f93e9faa2002c07c90b15134b7477 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +++-- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1abef01e5a2c..0ffa9a89986c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1996,7 +1996,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + clean_complete = clean_complete && + i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -2020,7 +2021,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ - clean_complete &= (budget_per_ring != cleaned); + clean_complete = clean_complete && (budget_per_ring > cleaned); } /* If work not completed, return budget and polling will return */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 6f739a7bc271..76bad75b0f67 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1432,7 +1432,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) * budget and be more aggressive about cleaning up the Tx descriptors. */ i40e_for_each_ring(ring, q_vector->tx) { - clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + clean_complete = clean_complete && + i40e_clean_tx_irq(ring, vsi->work_limit); arm_wb = arm_wb || ring->arm_wb; ring->arm_wb = false; } @@ -1456,7 +1457,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) work_done += cleaned; /* if we didn't clean as many as budgeted, we must be done */ - clean_complete &= (budget_per_ring != cleaned); + clean_complete = clean_complete && (budget_per_ring > cleaned); } /* If work not completed, return budget and polling will return */ -- cgit v1.2.3 From 4668607aa30b3879312823a0ddbcd15077644f4e Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 13 Jan 2016 16:51:51 -0800 Subject: i40e: properly show packet split status in debugfs Get rid of the unused hsplit field in the ring struct and use the existing macro to detect packet split enablement. This allows debugfs dumps of the VSI to properly show which Rx routine is in use. Change-ID: Ic4e9589e6a788ab196ed0850703f704e30c03781 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 6 +++--- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index bdac69185fef..34da53bfb21b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -521,7 +521,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) rx_ring->dtype); dev_info(&pf->pdev->dev, " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, rx_ring->hsplit, + i, ring_is_ps_enabled(rx_ring), rx_ring->next_to_use, rx_ring->next_to_clean, rx_ring->ring_active); @@ -572,8 +572,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " tx_rings[%i]: dtype = %d\n", i, tx_ring->dtype); dev_info(&pf->pdev->dev, - " tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", - i, tx_ring->hsplit, + " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n", + i, tx_ring->next_to_use, tx_ring->next_to_clean, tx_ring->ring_active); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3b8d14701613..ae22c4e9162f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -256,7 +256,6 @@ struct i40e_ring { #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 - u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 5f03c444c89b..5467fcdf7670 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -255,7 +255,6 @@ struct i40e_ring { #define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2 - u8 hsplit; #define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_TCP_UDP 0x4 -- cgit v1.2.3 From c24215c04fe7bf9f5678abe6c496d774a6b8663a Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 13 Jan 2016 16:51:52 -0800 Subject: i40e/i40evf: Bump version Bump version to i40e-1.4.13 and i40evf-1.4.9 Change-ID: I9db37f9d4899141c3e5455dfb456d45465b8c035 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8bc848f66371..8d41c6c26850 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 12 +#define DRV_VERSION_BUILD 13 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index faa1bca88699..1d81d57c8266 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 8 +#define DRV_VERSION_BUILD 9 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From fa477f4cb3de7bdd3899029803ebfcf269ba8c85 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 17 Feb 2016 14:34:53 -0800 Subject: ixgbe: use u32 instead of __u32 in model header I incorrectly used __u32 types where we should be using u32 types when I added the ixgbe_model.h file. Fixes: 9d35cf062e05 ("net: ixgbe: add minimal parser details for ixgbe") Suggested-by: Jamal Hadi Salim Signed-off-by: John Fastabend Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 43ebec4362f5..62ea2e712760 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -35,13 +35,13 @@ struct ixgbe_mat_field { unsigned int mask; int (*val)(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, - __u32 val, __u32 m); + u32 val, u32 m); unsigned int type; }; static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, - __u32 val, __u32 m) + u32 val, u32 m) { input->filter.formatted.src_ip[0] = val; mask->formatted.src_ip[0] = m; @@ -50,7 +50,7 @@ static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input, static inline int ixgbe_mat_prgm_dip(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, - __u32 val, __u32 m) + u32 val, u32 m) { input->filter.formatted.dst_ip[0] = val; mask->formatted.dst_ip[0] = m; @@ -67,7 +67,7 @@ static struct ixgbe_mat_field ixgbe_ipv4_fields[] = { static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, - __u32 val, __u32 m) + u32 val, u32 m) { input->filter.formatted.src_port = val & 0xffff; mask->formatted.src_port = m & 0xffff; @@ -76,7 +76,7 @@ static inline int ixgbe_mat_prgm_sport(struct ixgbe_fdir_filter *input, static inline int ixgbe_mat_prgm_dport(struct ixgbe_fdir_filter *input, union ixgbe_atr_input *mask, - __u32 val, __u32 m) + u32 val, u32 m) { input->filter.formatted.dst_port = val & 0xffff; mask->formatted.dst_port = m & 0xffff; @@ -94,12 +94,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = { struct ixgbe_nexthdr { /* offset, shift, and mask of position to next header */ unsigned int o; - __u32 s; - __u32 m; + u32 s; + u32 m; /* match criteria to make this jump*/ unsigned int off; - __u32 val; - __u32 mask; + u32 val; + u32 mask; /* location of jump to make */ struct ixgbe_mat_field *jump; }; -- cgit v1.2.3 From a92265ce1cea3832a47103ae16afa328a396e9af Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Wed, 17 Feb 2016 14:35:23 -0800 Subject: ixgbe: fix dates on header of ixgbe_model.h Fixes: 9d35cf062e05 ("net: ixgbe: add minimal parser details for ixgbe") Reported-by: Mark Rustad Signed-off-by: John Fastabend Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_model.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h index 62ea2e712760..ce48872d4782 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux drive - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, -- cgit v1.2.3 From 76f94a9c77d2fe6f0f02235eea84dae19edd3161 Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Wed, 17 Feb 2016 15:00:39 -0800 Subject: drivers: net: xgene: Add support for Classifier engine Signed-off-by: Iyappan Subramanian Signed-off-by: Khuong Dinh Signed-off-by: Tanmay Inamdar Tested-by: Toan Le Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/Makefile | 3 +- drivers/net/ethernet/apm/xgene/xgene_enet_cle.c | 357 +++++++++++++++++++++++ drivers/net/ethernet/apm/xgene/xgene_enet_cle.h | 254 ++++++++++++++++ drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 1 + drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 29 +- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 14 + 6 files changed, 649 insertions(+), 9 deletions(-) create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_cle.c create mode 100644 drivers/net/ethernet/apm/xgene/xgene_enet_cle.h (limited to 'drivers') diff --git a/drivers/net/ethernet/apm/xgene/Makefile b/drivers/net/ethernet/apm/xgene/Makefile index 700b5abe5de5..f46321f68315 100644 --- a/drivers/net/ethernet/apm/xgene/Makefile +++ b/drivers/net/ethernet/apm/xgene/Makefile @@ -3,5 +3,6 @@ # xgene-enet-objs := xgene_enet_hw.o xgene_enet_sgmac.o xgene_enet_xgmac.o \ - xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o + xgene_enet_main.o xgene_enet_ring2.o xgene_enet_ethtool.o \ + xgene_enet_cle.o obj-$(CONFIG_NET_XGENE) += xgene-enet.o diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c new file mode 100644 index 000000000000..ff24ca9647ed --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -0,0 +1,357 @@ +/* Applied Micro X-Gene SoC Ethernet Classifier structures + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Authors: Khuong Dinh + * Tanmay Inamdar + * Iyappan Subramanian + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "xgene_enet_main.h" + +static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, + struct xgene_cle_dbptr *dbptr, u32 *buf) +{ + buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) | + SET_VAL(CLE_DSTQIDL, dbptr->dstqid); + + buf[5] = SET_VAL(CLE_DSTQIDH, (u32)dbptr->dstqid >> CLE_DSTQIDL_LEN) | + SET_VAL(CLE_PRIORITY, dbptr->cle_priority); +} + +static void xgene_cle_kn_to_hw(struct xgene_cle_ptree_kn *kn, u32 *buf) +{ + u32 i, j = 0; + u32 data; + + buf[j++] = SET_VAL(CLE_TYPE, kn->node_type); + for (i = 0; i < kn->num_keys; i++) { + struct xgene_cle_ptree_key *key = &kn->key[i]; + + if (!(i % 2)) { + buf[j] = SET_VAL(CLE_KN_PRIO, key->priority) | + SET_VAL(CLE_KN_RPTR, key->result_pointer); + } else { + data = SET_VAL(CLE_KN_PRIO, key->priority) | + SET_VAL(CLE_KN_RPTR, key->result_pointer); + buf[j++] |= (data << 16); + } + } +} + +static void xgene_cle_dn_to_hw(struct xgene_cle_ptree_ewdn *dn, + u32 *buf, u32 jb) +{ + struct xgene_cle_ptree_branch *br; + u32 i, j = 0; + u32 npp; + + buf[j++] = SET_VAL(CLE_DN_TYPE, dn->node_type) | + SET_VAL(CLE_DN_LASTN, dn->last_node) | + SET_VAL(CLE_DN_HLS, dn->hdr_len_store) | + SET_VAL(CLE_DN_EXT, dn->hdr_extn) | + SET_VAL(CLE_DN_BSTOR, dn->byte_store) | + SET_VAL(CLE_DN_SBSTOR, dn->search_byte_store) | + SET_VAL(CLE_DN_RPTR, dn->result_pointer); + + for (i = 0; i < dn->num_branches; i++) { + br = &dn->branch[i]; + npp = br->next_packet_pointer; + + if ((br->jump_rel == JMP_ABS) && (npp < CLE_PKTRAM_SIZE)) + npp += jb; + + buf[j++] = SET_VAL(CLE_BR_VALID, br->valid) | + SET_VAL(CLE_BR_NPPTR, npp) | + SET_VAL(CLE_BR_JB, br->jump_bw) | + SET_VAL(CLE_BR_JR, br->jump_rel) | + SET_VAL(CLE_BR_OP, br->operation) | + SET_VAL(CLE_BR_NNODE, br->next_node) | + SET_VAL(CLE_BR_NBR, br->next_branch); + + buf[j++] = SET_VAL(CLE_BR_DATA, br->data) | + SET_VAL(CLE_BR_MASK, br->mask); + } +} + +static int xgene_cle_poll_cmd_done(void __iomem *base, + enum xgene_cle_cmd_type cmd) +{ + u32 status, loop = 10; + int ret = -EBUSY; + + while (loop--) { + status = ioread32(base + INDCMD_STATUS); + if (status & cmd) { + ret = 0; + break; + } + usleep_range(1000, 2000); + } + + return ret; +} + +static int xgene_cle_dram_wr(struct xgene_enet_cle *cle, u32 *data, u8 nregs, + u32 index, enum xgene_cle_dram_type type, + enum xgene_cle_cmd_type cmd) +{ + enum xgene_cle_parser parser = cle->active_parser; + void __iomem *base = cle->base; + u32 i, j, ind_addr; + u8 port, nparsers; + int ret = 0; + + /* PTREE_RAM onwards, DRAM regions are common for all parsers */ + nparsers = (type >= PTREE_RAM) ? 1 : cle->parsers; + + for (i = 0; i < nparsers; i++) { + port = i; + if ((type < PTREE_RAM) && (parser != PARSER_ALL)) + port = parser; + + ind_addr = XGENE_CLE_DRAM(type + (port * 4)) | index; + iowrite32(ind_addr, base + INDADDR); + for (j = 0; j < nregs; j++) + iowrite32(data[j], base + DATA_RAM0 + (j * 4)); + iowrite32(cmd, base + INDCMD); + + ret = xgene_cle_poll_cmd_done(base, cmd); + if (ret) + break; + } + + return ret; +} + +static void xgene_cle_enable_ptree(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + void __iomem *addr, *base = cle->base; + u32 offset = CLE_PORT_OFFSET; + u32 i; + + /* 1G port has to advance 4 bytes and 10G has to advance 8 bytes */ + ptree->start_pkt += cle->jump_bytes; + for (i = 0; i < cle->parsers; i++) { + if (cle->active_parser != PARSER_ALL) + addr = base + cle->active_parser * offset; + else + addr = base + (i * offset); + + iowrite32(ptree->start_node & 0x3fff, addr + SNPTR0); + iowrite32(ptree->start_pkt & 0x1ff, addr + SPPTR0); + } +} + +static int xgene_cle_setup_dbptr(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + u32 buf[CLE_DRAM_REGS]; + u32 i; + int ret; + + memset(buf, 0, sizeof(buf)); + for (i = 0; i < ptree->num_dbptr; i++) { + xgene_cle_dbptr_to_hw(pdata, &ptree->dbptr[i], buf); + ret = xgene_cle_dram_wr(cle, buf, 6, i + ptree->start_dbptr, + DB_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + return 0; +} + +static int xgene_cle_setup_node(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + struct xgene_cle_ptree *ptree = &cle->ptree; + struct xgene_cle_ptree_ewdn *dn = ptree->dn; + struct xgene_cle_ptree_kn *kn = ptree->kn; + u32 buf[CLE_DRAM_REGS]; + int i, j, ret; + + memset(buf, 0, sizeof(buf)); + for (i = 0; i < ptree->num_dn; i++) { + xgene_cle_dn_to_hw(&dn[i], buf, cle->jump_bytes); + ret = xgene_cle_dram_wr(cle, buf, 17, i + ptree->start_node, + PTREE_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + /* continue node index for key node */ + memset(buf, 0, sizeof(buf)); + for (j = i; j < (ptree->num_kn + ptree->num_dn); j++) { + xgene_cle_kn_to_hw(&kn[j - ptree->num_dn], buf); + ret = xgene_cle_dram_wr(cle, buf, 17, j + ptree->start_node, + PTREE_RAM, CLE_CMD_WR); + if (ret) + return ret; + } + + return 0; +} + +static int xgene_cle_setup_ptree(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *cle) +{ + int ret; + + ret = xgene_cle_setup_node(pdata, cle); + if (ret) + return ret; + + ret = xgene_cle_setup_dbptr(pdata, cle); + if (ret) + return ret; + + xgene_cle_enable_ptree(pdata, cle); + + return 0; +} + +static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, + struct xgene_enet_cle *enet_cle, + struct xgene_cle_dbptr *dbptr, + u32 index, u8 priority) +{ + void __iomem *base = enet_cle->base; + void __iomem *base_addr; + u32 buf[CLE_DRAM_REGS]; + u32 def_cls, offset; + u32 i, j; + + memset(buf, 0, sizeof(buf)); + xgene_cle_dbptr_to_hw(pdata, dbptr, buf); + + for (i = 0; i < enet_cle->parsers; i++) { + if (enet_cle->active_parser != PARSER_ALL) { + offset = enet_cle->active_parser * + CLE_PORT_OFFSET; + } else { + offset = i * CLE_PORT_OFFSET; + } + + base_addr = base + DFCLSRESDB00 + offset; + for (j = 0; j < 6; j++) + iowrite32(buf[j], base_addr + (j * 4)); + + def_cls = ((priority & 0x7) << 10) | (index & 0x3ff); + iowrite32(def_cls, base + DFCLSRESDBPTR0 + offset); + } +} + +static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_cle *enet_cle = &pdata->cle; + struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; + u32 def_qid, def_fpsel, pool_id; + struct xgene_cle_ptree *ptree; + struct xgene_cle_ptree_kn kn; + struct xgene_cle_ptree_ewdn ptree_dn[] = { + { + /* PKT_TYPE_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 0, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 1, + .branch = { + { + /* Allow all packet type */ + .valid = 0, + .next_packet_pointer = 0, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* LAST NODE */ + .node_type = EWDN, + .last_node = 1, + .hdr_len_store = 0, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 1, + .branch = { + { + .valid = 0, + .next_packet_pointer = 0, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = MAX_NODES, + .next_branch = 0, + .data = 0, + .mask = 0xffff + } + } + } + }; + + ptree = &enet_cle->ptree; + ptree->start_pkt = 12; /* Ethertype */ + + def_qid = xgene_enet_dst_ring_num(pdata->rx_ring); + pool_id = pdata->rx_ring->buf_pool->id; + def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + + memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); + dbptr[DB_RES_ACCEPT].fpsel = def_fpsel; + dbptr[DB_RES_ACCEPT].dstqid = def_qid; + dbptr[DB_RES_ACCEPT].cle_priority = 1; + + dbptr[DB_RES_DEF].fpsel = def_fpsel; + dbptr[DB_RES_DEF].dstqid = def_qid; + dbptr[DB_RES_DEF].cle_priority = 7; + xgene_cle_setup_def_dbptr(pdata, enet_cle, &dbptr[DB_RES_DEF], + DB_RES_ACCEPT, 7); + + dbptr[DB_RES_DROP].drop = 1; + + memset(&kn, 0, sizeof(kn)); + kn.node_type = KN; + kn.num_keys = 1; + kn.key[0].priority = 0; + kn.key[0].result_pointer = DB_RES_ACCEPT; + + ptree->dn = ptree_dn; + ptree->kn = &kn; + ptree->dbptr = dbptr; + ptree->num_dn = MAX_NODES; + ptree->num_kn = 1; + ptree->num_dbptr = DB_MAX_PTRS; + + return xgene_cle_setup_ptree(pdata, enet_cle); +} + +struct xgene_cle_ops xgene_cle3in_ops = { + .cle_init = xgene_enet_cle_init, +}; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h new file mode 100644 index 000000000000..1db2fd769602 --- /dev/null +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -0,0 +1,254 @@ +/* Applied Micro X-Gene SoC Ethernet Classifier structures + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Authors: Khuong Dinh + * Tanmay Inamdar + * Iyappan Subramanian + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __XGENE_ENET_CLE_H__ +#define __XGENE_ENET_CLE_H__ + +#include +#include + +/* Register offsets */ +#define INDADDR 0x04 +#define INDCMD 0x08 +#define INDCMD_STATUS 0x0c +#define DATA_RAM0 0x10 +#define SNPTR0 0x0100 +#define SPPTR0 0x0104 +#define DFCLSRESDBPTR0 0x0108 +#define DFCLSRESDB00 0x010c + +#define CLE_CMD_TO 10 /* ms */ +#define CLE_PKTRAM_SIZE 256 /* bytes */ +#define CLE_PORT_OFFSET 0x200 +#define CLE_DRAM_REGS 17 + +#define CLE_DN_TYPE_LEN 2 +#define CLE_DN_TYPE_POS 0 +#define CLE_DN_LASTN_LEN 1 +#define CLE_DN_LASTN_POS 2 +#define CLE_DN_HLS_LEN 1 +#define CLE_DN_HLS_POS 3 +#define CLE_DN_EXT_LEN 2 +#define CLE_DN_EXT_POS 4 +#define CLE_DN_BSTOR_LEN 2 +#define CLE_DN_BSTOR_POS 6 +#define CLE_DN_SBSTOR_LEN 2 +#define CLE_DN_SBSTOR_POS 8 +#define CLE_DN_RPTR_LEN 12 +#define CLE_DN_RPTR_POS 12 + +#define CLE_BR_VALID_LEN 1 +#define CLE_BR_VALID_POS 0 +#define CLE_BR_NPPTR_LEN 9 +#define CLE_BR_NPPTR_POS 1 +#define CLE_BR_JB_LEN 1 +#define CLE_BR_JB_POS 10 +#define CLE_BR_JR_LEN 1 +#define CLE_BR_JR_POS 11 +#define CLE_BR_OP_LEN 3 +#define CLE_BR_OP_POS 12 +#define CLE_BR_NNODE_LEN 9 +#define CLE_BR_NNODE_POS 15 +#define CLE_BR_NBR_LEN 5 +#define CLE_BR_NBR_POS 24 + +#define CLE_BR_DATA_LEN 16 +#define CLE_BR_DATA_POS 0 +#define CLE_BR_MASK_LEN 16 +#define CLE_BR_MASK_POS 16 + +#define CLE_KN_PRIO_POS 0 +#define CLE_KN_PRIO_LEN 3 +#define CLE_KN_RPTR_POS 3 +#define CLE_KN_RPTR_LEN 10 +#define CLE_TYPE_POS 0 +#define CLE_TYPE_LEN 2 + +#define CLE_DSTQIDL_POS 25 +#define CLE_DSTQIDL_LEN 7 +#define CLE_DSTQIDH_POS 0 +#define CLE_DSTQIDH_LEN 5 +#define CLE_FPSEL_POS 21 +#define CLE_FPSEL_LEN 4 +#define CLE_PRIORITY_POS 5 +#define CLE_PRIORITY_LEN 3 + +#define JMP_ABS 0 +#define JMP_REL 1 +#define JMP_FW 0 +#define JMP_BW 1 + +enum xgene_cle_ptree_nodes { + PKT_TYPE_NODE, + LAST_NODE, + MAX_NODES +}; + +enum xgene_cle_byte_store { + NO_BYTE, + FIRST_BYTE, + SECOND_BYTE, + BOTH_BYTES +}; + +/* Preclassification operation types */ +enum xgene_cle_node_type { + INV, + KN, + EWDN, + RES_NODE +}; + +/* Preclassification operation types */ +enum xgene_cle_op_type { + EQT, + NEQT, + LTEQT, + GTEQT, + AND, + NAND +}; + +enum xgene_cle_parser { + PARSER0, + PARSER1, + PARSER2, + PARSER_ALL +}; + +#define XGENE_CLE_DRAM(type) (((type) & 0xf) << 28) +enum xgene_cle_dram_type { + PKT_RAM, + PTREE_RAM = 0xc, + AVL_RAM, + DB_RAM +}; + +enum xgene_cle_cmd_type { + CLE_CMD_WR = 1, + CLE_CMD_RD = 2, + CLE_CMD_AVL_ADD = 8, + CLE_CMD_AVL_DEL = 16, + CLE_CMD_AVL_SRCH = 32 +}; + +enum xgene_cle_ptree_dbptrs { + DB_RES_DROP, + DB_RES_DEF, + DB_RES_ACCEPT, + DB_MAX_PTRS +}; + +struct xgene_cle_ptree_branch { + bool valid; + u16 next_packet_pointer; + bool jump_bw; + bool jump_rel; + u8 operation; + u16 next_node; + u8 next_branch; + u16 data; + u16 mask; +}; + +struct xgene_cle_ptree_ewdn { + u8 node_type; + bool last_node; + bool hdr_len_store; + u8 hdr_extn; + u8 byte_store; + u8 search_byte_store; + u16 result_pointer; + u8 num_branches; + struct xgene_cle_ptree_branch branch[6]; +}; + +struct xgene_cle_ptree_key { + u8 priority; + u16 result_pointer; +}; + +struct xgene_cle_ptree_kn { + u8 node_type; + u8 num_keys; + struct xgene_cle_ptree_key key[32]; +}; + +struct xgene_cle_dbptr { + u8 split_boundary; + u8 mirror_nxtfpsel; + u8 mirror_fpsel; + u16 mirror_dstqid; + u8 drop; + u8 mirror; + u8 hdr_data_split; + u64 hopinfomsbs; + u8 DR; + u8 HR; + u64 hopinfomlsbs; + u16 h0enq_num; + u8 h0fpsel; + u8 nxtfpsel; + u8 fpsel; + u16 dstqid; + u8 cle_priority; + u8 cle_flowgroup; + u8 cle_perflow; + u8 cle_insert_timestamp; + u8 stash; + u8 in; + u8 perprioen; + u8 perflowgroupen; + u8 perflowen; + u8 selhash; + u8 selhdrext; + u8 mirror_nxtfpsel_msb; + u8 mirror_fpsel_msb; + u8 hfpsel_msb; + u8 nxtfpsel_msb; + u8 fpsel_msb; +}; + +struct xgene_cle_ptree { + struct xgene_cle_ptree_ewdn *dn; + struct xgene_cle_ptree_kn *kn; + struct xgene_cle_dbptr *dbptr; + u32 num_dn; + u32 num_kn; + u32 num_dbptr; + u32 start_node; + u32 start_pkt; + u32 start_dbptr; +}; + +struct xgene_enet_cle { + void __iomem *base; + struct xgene_cle_ptree ptree; + enum xgene_cle_parser active_parser; + u32 parsers; + u32 max_nodes; + u32 max_dbptrs; + u32 jump_bytes; +}; + +extern struct xgene_cle_ops xgene_cle3in_ops; + +#endif /* __XGENE_ENET_CLE_H__ */ diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8a9091039ab4..45725ec7cfed 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -101,6 +101,7 @@ enum xgene_enet_rm { #define MAC_OFFSET 0x30 #define BLOCK_ETH_CSR_OFFSET 0x2000 +#define BLOCK_ETH_CLE_CSR_OFFSET 0x6000 #define BLOCK_ETH_RING_IF_OFFSET 0x9000 #define BLOCK_ETH_CLKRST_CSR_OFFSET 0xc000 #define BLOCK_ETH_DIAG_CSR_OFFSET 0xD000 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 5eb9b20c0eea..0bf3924a5ada 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -93,13 +93,6 @@ static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring *buf_pool, return 0; } -static u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) -{ - struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); - - return ((u16)pdata->rm << 10) | ring->num; -} - static u8 xgene_enet_hdr_len(const void *data) { const struct ethhdr *eth = data; @@ -1278,6 +1271,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) else base_addr = pdata->base_addr; pdata->eth_csr_addr = base_addr + BLOCK_ETH_CSR_OFFSET; + pdata->cle.base = base_addr + BLOCK_ETH_CLE_CSR_OFFSET; pdata->eth_ring_if_addr = base_addr + BLOCK_ETH_RING_IF_OFFSET; pdata->eth_diag_csr_addr = base_addr + BLOCK_ETH_DIAG_CSR_OFFSET; if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII || @@ -1298,6 +1292,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) { + struct xgene_enet_cle *enet_cle = &pdata->cle; struct net_device *ndev = pdata->ndev; struct xgene_enet_desc_ring *buf_pool; u16 dst_ring_num; @@ -1323,7 +1318,24 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) } dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); - pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + /* Initialize and Enable PreClassifier Tree */ + enet_cle->max_nodes = 512; + enet_cle->max_dbptrs = 1024; + enet_cle->parsers = 3; + enet_cle->active_parser = PARSER_ALL; + enet_cle->ptree.start_node = 0; + enet_cle->ptree.start_dbptr = 0; + enet_cle->jump_bytes = 8; + ret = pdata->cle_ops->cle_init(pdata); + if (ret) { + netdev_err(ndev, "Preclass Tree init error\n"); + return ret; + } + } else { + pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id); + } + pdata->mac_ops->init(pdata); return ret; @@ -1345,6 +1357,7 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; + pdata->cle_ops = &xgene_cle3in_ops; pdata->rm = RM0; break; } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 248dfc40a761..05365c1b12db 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -36,6 +36,7 @@ #include #include #include "xgene_enet_hw.h" +#include "xgene_enet_cle.h" #include "xgene_enet_ring2.h" #define XGENE_DRV_VERSION "v1.0" @@ -145,6 +146,10 @@ struct xgene_ring_ops { u32 (*len)(struct xgene_enet_desc_ring *); }; +struct xgene_cle_ops { + int (*cle_init)(struct xgene_enet_pdata *pdata); +}; + /* ethernet private data */ struct xgene_enet_pdata { struct net_device *ndev; @@ -174,10 +179,12 @@ struct xgene_enet_pdata { void __iomem *ring_cmd_addr; int phy_mode; enum xgene_enet_rm rm; + struct xgene_enet_cle cle; struct rtnl_link_stats64 stats; const struct xgene_mac_ops *mac_ops; const struct xgene_port_ops *port_ops; struct xgene_ring_ops *ring_ops; + struct xgene_cle_ops *cle_ops; struct delayed_work link_work; u32 port_id; u8 cpu_bufnum; @@ -229,6 +236,13 @@ static inline struct device *ndev_to_dev(struct net_device *ndev) return ndev->dev.parent; } +static inline u16 xgene_enet_dst_ring_num(struct xgene_enet_desc_ring *ring) +{ + struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + + return ((u16)pdata->rm << 10) | ring->num; +} + void xgene_enet_set_ethtool_ops(struct net_device *netdev); #endif /* __XGENE_ENET_MAIN_H__ */ -- cgit v1.2.3 From fc4262d2aa9aa26ec4a29bf274bcfca569656e73 Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Wed, 17 Feb 2016 15:00:40 -0800 Subject: drivers: net: xgene: Add support for RSS Signed-off-by: Iyappan Subramanian Signed-off-by: Khuong Dinh Signed-off-by: Tanmay Inamdar Tested-by: Toan Le Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_cle.c | 386 +++++++++++++++++++++++- drivers/net/ethernet/apm/xgene/xgene_enet_cle.h | 41 +++ 2 files changed, 422 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index ff24ca9647ed..c00749727e6a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -21,6 +21,25 @@ #include "xgene_enet_main.h" +/* interfaces to convert structures to HW recognized bit formats */ +static void xgene_cle_sband_to_hw(u8 frag, enum xgene_cle_prot_version ver, + enum xgene_cle_prot_type type, u32 len, + u32 *reg) +{ + *reg = SET_VAL(SB_IPFRAG, frag) | + SET_VAL(SB_IPPROT, type) | + SET_VAL(SB_IPVER, ver) | + SET_VAL(SB_HDRLEN, len); +} + +static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel, + u32 nfpsel, u32 *idt_reg) +{ + *idt_reg = SET_VAL(IDT_DSTQID, dstqid) | + SET_VAL(IDT_FPSEL, fpsel) | + SET_VAL(IDT_NFPSEL, nfpsel); +} + static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata, struct xgene_cle_dbptr *dbptr, u32 *buf) { @@ -257,29 +276,372 @@ static void xgene_cle_setup_def_dbptr(struct xgene_enet_pdata *pdata, } } +static int xgene_cle_set_rss_sband(struct xgene_enet_cle *cle) +{ + u32 idx = CLE_PKTRAM_SIZE / sizeof(u32); + u32 mac_hdr_len = ETH_HLEN; + u32 sband, reg = 0; + u32 ipv4_ihl = 5; + u32 hdr_len; + int ret; + + /* Sideband: IPV4/TCP packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_TCP, hdr_len, ®); + sband = reg; + + /* Sideband: IPv4/UDP packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(1, XGENE_CLE_IPV4, XGENE_CLE_UDP, hdr_len, ®); + sband |= (reg << 16); + + ret = xgene_cle_dram_wr(cle, &sband, 1, idx, PKT_RAM, CLE_CMD_WR); + if (ret) + return ret; + + /* Sideband: IPv4/RAW packets */ + hdr_len = (mac_hdr_len << 5) | ipv4_ihl; + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, + hdr_len, ®); + sband = reg; + + /* Sideband: Ethernet II/RAW packets */ + hdr_len = (mac_hdr_len << 5); + xgene_cle_sband_to_hw(0, XGENE_CLE_IPV4, XGENE_CLE_OTHER, + hdr_len, ®); + sband |= (reg << 16); + + ret = xgene_cle_dram_wr(cle, &sband, 1, idx + 1, PKT_RAM, CLE_CMD_WR); + if (ret) + return ret; + + return 0; +} + +static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) +{ + u32 secret_key_ipv4[4]; /* 16 Bytes*/ + int ret = 0; + + get_random_bytes(secret_key_ipv4, 16); + ret = xgene_cle_dram_wr(cle, secret_key_ipv4, 4, 0, + RSS_IPV4_HASH_SKEY, CLE_CMD_WR); + return ret; +} + +static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) +{ + u32 fpsel, dstqid, nfpsel, idt_reg; + int i, ret = 0; + u16 pool_id; + + for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { + pool_id = pdata->rx_ring->buf_pool->id; + fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; + dstqid = xgene_enet_dst_ring_num(pdata->rx_ring); + nfpsel = 0; + idt_reg = 0; + + xgene_cle_idt_to_hw(dstqid, fpsel, nfpsel, &idt_reg); + ret = xgene_cle_dram_wr(&pdata->cle, &idt_reg, 1, i, + RSS_IDT, CLE_CMD_WR); + if (ret) + return ret; + } + + ret = xgene_cle_set_rss_skeys(&pdata->cle); + if (ret) + return ret; + + return 0; +} + +static int xgene_cle_setup_rss(struct xgene_enet_pdata *pdata) +{ + struct xgene_enet_cle *cle = &pdata->cle; + void __iomem *base = cle->base; + u32 offset, val = 0; + int i, ret = 0; + + offset = CLE_PORT_OFFSET; + for (i = 0; i < cle->parsers; i++) { + if (cle->active_parser != PARSER_ALL) + offset = cle->active_parser * CLE_PORT_OFFSET; + else + offset = i * CLE_PORT_OFFSET; + + /* enable RSS */ + val = (RSS_IPV4_12B << 1) | 0x1; + writel(val, base + RSS_CTRL0 + offset); + } + + /* setup sideband data */ + ret = xgene_cle_set_rss_sband(cle); + if (ret) + return ret; + + /* setup indirection table */ + ret = xgene_cle_set_rss_idt(pdata); + if (ret) + return ret; + + return 0; +} + static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) { struct xgene_enet_cle *enet_cle = &pdata->cle; struct xgene_cle_dbptr dbptr[DB_MAX_PTRS]; + struct xgene_cle_ptree_branch *br; u32 def_qid, def_fpsel, pool_id; struct xgene_cle_ptree *ptree; struct xgene_cle_ptree_kn kn; + int ret; struct xgene_cle_ptree_ewdn ptree_dn[] = { { /* PKT_TYPE_NODE */ .node_type = EWDN, .last_node = 0, - .hdr_len_store = 0, + .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = NO_BYTE, .result_pointer = DB_RES_DROP, - .num_branches = 1, + .num_branches = 2, .branch = { { - /* Allow all packet type */ + /* IPV4 */ .valid = 0, - .next_packet_pointer = 0, + .next_packet_pointer = 22, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = PKT_PROT_NODE, + .next_branch = 0, + .data = 0x8, + .mask = 0xffff + }, + { + .valid = 0, + .next_packet_pointer = 262, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + }, + }, + { + /* PKT_PROT_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = NO_BYTE, + .result_pointer = DB_RES_DROP, + .num_branches = 3, + .branch = { + { + /* TCP */ + .valid = 1, + .next_packet_pointer = 26, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 0, + .data = 0x0600, + .mask = 0xffff + }, + { + /* UDP */ + .valid = 1, + .next_packet_pointer = 26, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 0, + .data = 0x1100, + .mask = 0xffff + }, + { + .valid = 0, + .next_packet_pointer = 260, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* RSS_IPV4_TCP_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_TCP_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 256, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = LAST_NODE, + .next_branch = 0, + .data = 0x0, + .mask = 0xffff + } + } + }, + { + /* RSS_IPV4_UDP_NODE */ + .node_type = EWDN, + .last_node = 0, + .hdr_len_store = 1, + .hdr_extn = NO_BYTE, + .byte_store = NO_BYTE, + .search_byte_store = BOTH_BYTES, + .result_pointer = DB_RES_DROP, + .num_branches = 6, + .branch = { + { + /* SRC IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 28, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 1, + .data = 0x0, + .mask = 0xffff + }, + { + /* SRC IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 30, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 2, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B01 */ + .valid = 0, + .next_packet_pointer = 32, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 3, + .data = 0x0, + .mask = 0xffff + }, + { + /* DST IPV4 B23 */ + .valid = 0, + .next_packet_pointer = 34, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 4, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP SRC Port */ + .valid = 0, + .next_packet_pointer = 36, + .jump_bw = JMP_FW, + .jump_rel = JMP_ABS, + .operation = EQT, + .next_node = RSS_IPV4_UDP_NODE, + .next_branch = 5, + .data = 0x0, + .mask = 0xffff + }, + { + /* TCP DST Port */ + .valid = 0, + .next_packet_pointer = 256, .jump_bw = JMP_FW, .jump_rel = JMP_ABS, .operation = EQT, @@ -294,7 +656,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) /* LAST NODE */ .node_type = EWDN, .last_node = 1, - .hdr_len_store = 0, + .hdr_len_store = 1, .hdr_extn = NO_BYTE, .byte_store = NO_BYTE, .search_byte_store = NO_BYTE, @@ -318,6 +680,20 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) ptree = &enet_cle->ptree; ptree->start_pkt = 12; /* Ethertype */ + if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { + ret = xgene_cle_setup_rss(pdata); + if (ret) { + netdev_err(pdata->ndev, "RSS initialization failed\n"); + return ret; + } + } else { + br = &ptree_dn[PKT_PROT_NODE].branch[0]; + br->valid = 0; + br->next_packet_pointer = 260; + br->next_node = LAST_NODE; + br->data = 0x0000; + br->mask = 0xffff; + } def_qid = xgene_enet_dst_ring_num(pdata->rx_ring); pool_id = pdata->rx_ring->buf_pool->id; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h index 1db2fd769602..29a17abdd828 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h @@ -34,6 +34,7 @@ #define SPPTR0 0x0104 #define DFCLSRESDBPTR0 0x0108 #define DFCLSRESDB00 0x010c +#define RSS_CTRL0 0x0000013c #define CLE_CMD_TO 10 /* ms */ #define CLE_PKTRAM_SIZE 256 /* bytes */ @@ -98,6 +99,9 @@ enum xgene_cle_ptree_nodes { PKT_TYPE_NODE, + PKT_PROT_NODE, + RSS_IPV4_TCP_NODE, + RSS_IPV4_UDP_NODE, LAST_NODE, MAX_NODES }; @@ -137,6 +141,8 @@ enum xgene_cle_parser { #define XGENE_CLE_DRAM(type) (((type) & 0xf) << 28) enum xgene_cle_dram_type { PKT_RAM, + RSS_IDT, + RSS_IPV4_HASH_SKEY, PTREE_RAM = 0xc, AVL_RAM, DB_RAM @@ -150,6 +156,22 @@ enum xgene_cle_cmd_type { CLE_CMD_AVL_SRCH = 32 }; +enum xgene_cle_ipv4_rss_hashtype { + RSS_IPV4_8B, + RSS_IPV4_12B, +}; + +enum xgene_cle_prot_type { + XGENE_CLE_TCP, + XGENE_CLE_UDP, + XGENE_CLE_ESP, + XGENE_CLE_OTHER +}; + +enum xgene_cle_prot_version { + XGENE_CLE_IPV4, +}; + enum xgene_cle_ptree_dbptrs { DB_RES_DROP, DB_RES_DEF, @@ -157,6 +179,25 @@ enum xgene_cle_ptree_dbptrs { DB_MAX_PTRS }; +/* RSS sideband signal info */ +#define SB_IPFRAG_POS 0 +#define SB_IPFRAG_LEN 1 +#define SB_IPPROT_POS 1 +#define SB_IPPROT_LEN 2 +#define SB_IPVER_POS 3 +#define SB_IPVER_LEN 1 +#define SB_HDRLEN_POS 4 +#define SB_HDRLEN_LEN 12 + +/* RSS indirection table */ +#define XGENE_CLE_IDT_ENTRIES 128 +#define IDT_DSTQID_POS 0 +#define IDT_DSTQID_LEN 12 +#define IDT_FPSEL_POS 12 +#define IDT_FPSEL_LEN 4 +#define IDT_NFPSEL_POS 16 +#define IDT_NFPSEL_LEN 4 + struct xgene_cle_ptree_branch { bool valid; u16 next_packet_pointer; -- cgit v1.2.3 From 107dec2749fed1b4172f77c091961b15221419db Mon Sep 17 00:00:00 2001 From: Iyappan Subramanian Date: Wed, 17 Feb 2016 15:00:41 -0800 Subject: drivers: net: xgene: Add support for multiple queues Signed-off-by: Iyappan Subramanian Signed-off-by: Khuong Dinh Signed-off-by: Tanmay Inamdar Tested-by: Toan Le Signed-off-by: David S. Miller --- drivers/net/ethernet/apm/xgene/xgene_enet_cle.c | 11 +- drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | 12 + drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | 5 + drivers/net/ethernet/apm/xgene/xgene_enet_main.c | 453 +++++++++++++--------- drivers/net/ethernet/apm/xgene/xgene_enet_main.h | 21 +- drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c | 12 + 6 files changed, 320 insertions(+), 194 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c index c00749727e6a..b212488606da 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c @@ -331,14 +331,15 @@ static int xgene_cle_set_rss_skeys(struct xgene_enet_cle *cle) static int xgene_cle_set_rss_idt(struct xgene_enet_pdata *pdata) { - u32 fpsel, dstqid, nfpsel, idt_reg; + u32 fpsel, dstqid, nfpsel, idt_reg, idx; int i, ret = 0; u16 pool_id; for (i = 0; i < XGENE_CLE_IDT_ENTRIES; i++) { - pool_id = pdata->rx_ring->buf_pool->id; + idx = i % pdata->rxq_cnt; + pool_id = pdata->rx_ring[idx]->buf_pool->id; fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; - dstqid = xgene_enet_dst_ring_num(pdata->rx_ring); + dstqid = xgene_enet_dst_ring_num(pdata->rx_ring[idx]); nfpsel = 0; idt_reg = 0; @@ -695,8 +696,8 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata) br->mask = 0xffff; } - def_qid = xgene_enet_dst_ring_num(pdata->rx_ring); - pool_id = pdata->rx_ring->buf_pool->id; + def_qid = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + pool_id = pdata->rx_ring[0]->buf_pool->id; def_fpsel = xgene_enet_ring_bufnum(pool_id) - 0x20; memset(dbptr, 0, sizeof(struct xgene_cle_dbptr) * DB_MAX_PTRS); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index db55c9f6e8e1..39e081a70f5b 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -204,6 +204,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } +static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) +{ + u32 data = 0x7777; + + xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); +} + void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, struct xgene_enet_pdata *pdata, enum xgene_enet_err_code status) @@ -892,4 +903,5 @@ struct xgene_ring_ops xgene_ring1_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, + .coalesce = xgene_enet_setup_coalescing, }; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 45725ec7cfed..ba7da98af2ef 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -54,6 +54,11 @@ enum xgene_enet_rm { #define IS_BUFFER_POOL BIT(20) #define PREFETCH_BUF_EN BIT(21) #define CSR_RING_ID_BUF 0x000c +#define CSR_PBM_COAL 0x0014 +#define CSR_PBM_CTICK1 0x001c +#define CSR_PBM_CTICK2 0x0020 +#define CSR_THRESHOLD0_SET1 0x0030 +#define CSR_THRESHOLD1_SET1 0x0034 #define CSR_RING_NE_INT_MODE 0x017c #define CSR_RING_CONFIG 0x006c #define CSR_RING_WR_BASE 0x0070 diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 0bf3924a5ada..8d4c1ad2fc60 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -182,7 +182,6 @@ static int xgene_enet_tx_completion(struct xgene_enet_desc_ring *cp_ring, static u64 xgene_enet_work_msg(struct sk_buff *skb) { struct net_device *ndev = skb->dev; - struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct iphdr *iph; u8 l3hlen = 0, l4hlen = 0; u8 ethhdr, proto = 0, csum_enable = 0; @@ -228,10 +227,6 @@ static u64 xgene_enet_work_msg(struct sk_buff *skb) if (!mss || ((skb->len - hdr_len) <= mss)) goto out; - if (mss != pdata->mss) { - pdata->mss = mss; - pdata->mac_ops->set_mss(pdata); - } hopinfo |= SET_BIT(ET); } } else if (iph->protocol == IPPROTO_UDP) { @@ -413,7 +408,7 @@ out: raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) | SET_VAL(USERINFO, tx_ring->tail)); tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb; - pdata->tx_level += count; + pdata->tx_level[tx_ring->cp_ring->index] += count; tx_ring->tail = tail; return count; @@ -423,15 +418,17 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); - struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring; - u32 tx_level = pdata->tx_level; + struct xgene_enet_desc_ring *tx_ring; + int index = skb->queue_mapping; + u32 tx_level = pdata->tx_level[index]; int count; - if (tx_level < pdata->txc_level) - tx_level += ((typeof(pdata->tx_level))~0U); + tx_ring = pdata->tx_ring[index]; + if (tx_level < pdata->txc_level[index]) + tx_level += ((typeof(pdata->tx_level[index]))~0U); - if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) { - netif_stop_queue(ndev); + if ((tx_level - pdata->txc_level[index]) > pdata->tx_qcnt_hi) { + netif_stop_subqueue(ndev, index); return NETDEV_TX_BUSY; } @@ -529,7 +526,8 @@ static bool is_rx_desc(struct xgene_enet_raw_desc *raw_desc) static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, int budget) { - struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); + struct net_device *ndev = ring->ndev; + struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct xgene_enet_raw_desc *raw_desc, *exp_desc; u16 head = ring->head; u16 slots = ring->slots - 1; @@ -573,7 +571,7 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, desc_count++; processed++; if (is_completion) - pdata->txc_level += desc_count; + pdata->txc_level[ring->index] += desc_count; if (ret) break; @@ -583,8 +581,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring, pdata->ring_ops->wr_cmd(ring, -count); ring->head = head; - if (netif_queue_stopped(ring->ndev)) - netif_start_queue(ring->ndev); + if (__netif_subqueue_stopped(ndev, ring->index)) + netif_start_subqueue(ndev, ring->index); } return processed; @@ -609,8 +607,16 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget) static void xgene_enet_timeout(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); + struct netdev_queue *txq; + int i; pdata->mac_ops->reset(pdata); + + for (i = 0; i < pdata->txq_cnt; i++) { + txq = netdev_get_tx_queue(ndev, i); + txq->trans_start = jiffies; + netif_tx_start_queue(txq); + } } static int xgene_enet_register_irq(struct net_device *ndev) @@ -618,17 +624,21 @@ static int xgene_enet_register_irq(struct net_device *ndev) struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device *dev = ndev_to_dev(ndev); struct xgene_enet_desc_ring *ring; - int ret; + int ret = 0, i; - ring = pdata->rx_ring; - irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); - ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, - IRQF_SHARED, ring->irq_name, ring); - if (ret) - netdev_err(ndev, "Failed to request irq %s\n", ring->irq_name); + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, + IRQF_SHARED, ring->irq_name, ring); + if (ret) { + netdev_err(ndev, "Failed to request irq %s\n", + ring->irq_name); + } + } - if (pdata->cq_cnt) { - ring = pdata->tx_ring->cp_ring; + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); @@ -646,15 +656,19 @@ static void xgene_enet_free_irq(struct net_device *ndev) struct xgene_enet_pdata *pdata; struct xgene_enet_desc_ring *ring; struct device *dev; + int i; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); - ring = pdata->rx_ring; - irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); - devm_free_irq(dev, ring->irq, ring); - if (pdata->cq_cnt) { - ring = pdata->tx_ring->cp_ring; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); + } + + for (i = 0; i < pdata->cq_cnt; i++) { + ring = pdata->tx_ring[i]->cp_ring; irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); devm_free_irq(dev, ring->irq, ring); } @@ -663,12 +677,15 @@ static void xgene_enet_free_irq(struct net_device *ndev) static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - napi_enable(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + napi_enable(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; napi_enable(napi); } } @@ -676,12 +693,15 @@ static void xgene_enet_napi_enable(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_disable(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - napi_disable(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + napi_disable(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; napi_disable(napi); } } @@ -692,6 +712,14 @@ static int xgene_enet_open(struct net_device *ndev) const struct xgene_mac_ops *mac_ops = pdata->mac_ops; int ret; + ret = netif_set_real_num_tx_queues(ndev, pdata->txq_cnt); + if (ret) + return ret; + + ret = netif_set_real_num_rx_queues(ndev, pdata->rxq_cnt); + if (ret) + return ret; + mac_ops->tx_enable(pdata); mac_ops->rx_enable(pdata); @@ -714,6 +742,7 @@ static int xgene_enet_close(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; + int i; netif_stop_queue(ndev); @@ -727,7 +756,8 @@ static int xgene_enet_close(struct net_device *ndev) xgene_enet_free_irq(ndev); xgene_enet_napi_disable(pdata); - xgene_enet_process_ring(pdata->rx_ring, -1); + for (i = 0; i < pdata->rxq_cnt; i++) + xgene_enet_process_ring(pdata->rx_ring[i], -1); return 0; } @@ -747,18 +777,26 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring) static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata) { struct xgene_enet_desc_ring *buf_pool; + struct xgene_enet_desc_ring *ring; + int i; - if (pdata->tx_ring) { - xgene_enet_delete_ring(pdata->tx_ring); - pdata->tx_ring = NULL; + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + xgene_enet_delete_ring(ring); + pdata->tx_ring[i] = NULL; + } } - if (pdata->rx_ring) { - buf_pool = pdata->rx_ring->buf_pool; - xgene_enet_delete_bufpool(buf_pool); - xgene_enet_delete_ring(buf_pool); - xgene_enet_delete_ring(pdata->rx_ring); - pdata->rx_ring = NULL; + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + buf_pool = ring->buf_pool; + xgene_enet_delete_bufpool(buf_pool); + xgene_enet_delete_ring(buf_pool); + xgene_enet_delete_ring(ring); + pdata->rx_ring[i] = NULL; + } } } @@ -813,24 +851,29 @@ static void xgene_enet_free_desc_rings(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; struct xgene_enet_desc_ring *ring; + int i; - ring = pdata->tx_ring; - if (ring) { - if (ring->cp_ring && ring->cp_ring->cp_skb) - devm_kfree(dev, ring->cp_ring->cp_skb); - if (ring->cp_ring && pdata->cq_cnt) - xgene_enet_free_desc_ring(ring->cp_ring); - xgene_enet_free_desc_ring(ring); - } - - ring = pdata->rx_ring; - if (ring) { - if (ring->buf_pool) { - if (ring->buf_pool->rx_skb) - devm_kfree(dev, ring->buf_pool->rx_skb); - xgene_enet_free_desc_ring(ring->buf_pool); + for (i = 0; i < pdata->txq_cnt; i++) { + ring = pdata->tx_ring[i]; + if (ring) { + if (ring->cp_ring && ring->cp_ring->cp_skb) + devm_kfree(dev, ring->cp_ring->cp_skb); + if (ring->cp_ring && pdata->cq_cnt) + xgene_enet_free_desc_ring(ring->cp_ring); + xgene_enet_free_desc_ring(ring); + } + } + + for (i = 0; i < pdata->rxq_cnt; i++) { + ring = pdata->rx_ring[i]; + if (ring) { + if (ring->buf_pool) { + if (ring->buf_pool->rx_skb) + devm_kfree(dev, ring->buf_pool->rx_skb); + xgene_enet_free_desc_ring(ring->buf_pool); + } + xgene_enet_free_desc_ring(ring); } - xgene_enet_free_desc_ring(ring); } } @@ -943,104 +986,120 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) u8 bp_bufnum = pdata->bp_bufnum; u16 ring_num = pdata->ring_num; u16 ring_id; - int ret, size; - - /* allocate rx descriptor ring */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); - rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_16KB, ring_id); - if (!rx_ring) { - ret = -ENOMEM; - goto err; - } - - /* allocate buffer pool for receiving packets */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); - buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_2KB, ring_id); - if (!buf_pool) { - ret = -ENOMEM; - goto err; - } + int i, ret, size; - rx_ring->nbufpool = NUM_BUFPOOL; - rx_ring->buf_pool = buf_pool; - rx_ring->irq = pdata->rx_irq; - if (!pdata->cq_cnt) { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", - ndev->name); - } else { - snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx", ndev->name); - } - buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, - sizeof(struct sk_buff *), GFP_KERNEL); - if (!buf_pool->rx_skb) { - ret = -ENOMEM; - goto err; - } - - buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); - rx_ring->buf_pool = buf_pool; - pdata->rx_ring = rx_ring; + for (i = 0; i < pdata->rxq_cnt; i++) { + /* allocate rx descriptor ring */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); + rx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!rx_ring) { + ret = -ENOMEM; + goto err; + } - /* allocate tx descriptor ring */ - owner = xgene_derive_ring_owner(pdata); - ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); - tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, - RING_CFGSIZE_16KB, ring_id); - if (!tx_ring) { - ret = -ENOMEM; - goto err; - } + /* allocate buffer pool for receiving packets */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, bp_bufnum++); + buf_pool = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_2KB, + ring_id); + if (!buf_pool) { + ret = -ENOMEM; + goto err; + } - size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; - tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, &dma_exp_bufs, + rx_ring->nbufpool = NUM_BUFPOOL; + rx_ring->buf_pool = buf_pool; + rx_ring->irq = pdata->irqs[i]; + if (!pdata->cq_cnt) { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc", + ndev->name); + } else { + snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d", + ndev->name, i); + } + buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots, + sizeof(struct sk_buff *), GFP_KERNEL); - if (!tx_ring->exp_bufs) { - ret = -ENOMEM; - goto err; - } + if (!buf_pool->rx_skb) { + ret = -ENOMEM; + goto err; + } - pdata->tx_ring = tx_ring; + buf_pool->dst_ring_num = xgene_enet_dst_ring_num(buf_pool); + rx_ring->buf_pool = buf_pool; + pdata->rx_ring[i] = rx_ring; + } - if (!pdata->cq_cnt) { - cp_ring = pdata->rx_ring; - } else { - /* allocate tx completion descriptor ring */ - ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, cpu_bufnum++); - cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + for (i = 0; i < pdata->txq_cnt; i++) { + /* allocate tx descriptor ring */ + owner = xgene_derive_ring_owner(pdata); + ring_id = xgene_enet_get_ring_id(owner, eth_bufnum++); + tx_ring = xgene_enet_create_desc_ring(ndev, ring_num++, RING_CFGSIZE_16KB, ring_id); - if (!cp_ring) { + if (!tx_ring) { ret = -ENOMEM; goto err; } - cp_ring->irq = pdata->txc_irq; - snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc", ndev->name); - } - cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, - sizeof(struct sk_buff *), GFP_KERNEL); - if (!cp_ring->cp_skb) { - ret = -ENOMEM; - goto err; - } + size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS; + tx_ring->exp_bufs = dma_zalloc_coherent(dev, size, + &dma_exp_bufs, + GFP_KERNEL); + if (!tx_ring->exp_bufs) { + ret = -ENOMEM; + goto err; + } - size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; - cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, - size, GFP_KERNEL); - if (!cp_ring->frag_dma_addr) { - devm_kfree(dev, cp_ring->cp_skb); - ret = -ENOMEM; - goto err; - } + pdata->tx_ring[i] = tx_ring; - pdata->tx_ring->cp_ring = cp_ring; - pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + if (!pdata->cq_cnt) { + cp_ring = pdata->rx_ring[i]; + } else { + /* allocate tx completion descriptor ring */ + ring_id = xgene_enet_get_ring_id(RING_OWNER_CPU, + cpu_bufnum++); + cp_ring = xgene_enet_create_desc_ring(ndev, ring_num++, + RING_CFGSIZE_16KB, + ring_id); + if (!cp_ring) { + ret = -ENOMEM; + goto err; + } + + cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i]; + cp_ring->index = i; + snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d", + ndev->name, i); + } + + cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots, + sizeof(struct sk_buff *), + GFP_KERNEL); + if (!cp_ring->cp_skb) { + ret = -ENOMEM; + goto err; + } - pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128; + size = sizeof(dma_addr_t) * MAX_SKB_FRAGS; + cp_ring->frag_dma_addr = devm_kcalloc(dev, tx_ring->slots, + size, GFP_KERNEL); + if (!cp_ring->frag_dma_addr) { + devm_kfree(dev, cp_ring->cp_skb); + ret = -ENOMEM; + goto err; + } + + tx_ring->cp_ring = cp_ring; + tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); + } + + pdata->ring_ops->coalesce(pdata->tx_ring[0]); + pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; return 0; @@ -1159,6 +1218,32 @@ static int xgene_get_rx_delay(struct xgene_enet_pdata *pdata) return 0; } +static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata) +{ + struct platform_device *pdev = pdata->pdev; + struct device *dev = &pdev->dev; + int i, ret, max_irqs; + + if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) + max_irqs = 1; + else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) + max_irqs = 2; + else + max_irqs = XGENE_MAX_ENET_IRQ; + + for (i = 0; i < max_irqs; i++) { + ret = platform_get_irq(pdev, i); + if (ret <= 0) { + dev_err(dev, "Unable to get ENET IRQ\n"); + ret = ret ? : -ENXIO; + return ret; + } + pdata->irqs[i] = ret; + } + + return 0; +} + static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) { struct platform_device *pdev; @@ -1240,25 +1325,9 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) if (ret) return ret; - ret = platform_get_irq(pdev, 0); - if (ret <= 0) { - dev_err(dev, "Unable to get ENET Rx IRQ\n"); - ret = ret ? : -ENXIO; + ret = xgene_enet_get_irqs(pdata); + if (ret) return ret; - } - pdata->rx_irq = ret; - - if (pdata->phy_mode != PHY_INTERFACE_MODE_RGMII) { - ret = platform_get_irq(pdev, 1); - if (ret <= 0) { - pdata->cq_cnt = 0; - dev_info(dev, "Unable to get Tx completion IRQ," - "using Rx IRQ instead\n"); - } else { - pdata->cq_cnt = XGENE_MAX_TXC_RINGS; - pdata->txc_irq = ret; - } - } pdata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pdata->clk)) { @@ -1296,7 +1365,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) struct net_device *ndev = pdata->ndev; struct xgene_enet_desc_ring *buf_pool; u16 dst_ring_num; - int ret; + int i, ret; ret = pdata->port_ops->reset(pdata); if (ret) @@ -1309,15 +1378,18 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata) } /* setup buffer pool */ - buf_pool = pdata->rx_ring->buf_pool; - xgene_enet_init_bufpool(buf_pool); - ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); - if (ret) { - xgene_enet_delete_desc_rings(pdata); - return ret; + for (i = 0; i < pdata->rxq_cnt; i++) { + buf_pool = pdata->rx_ring[i]->buf_pool; + xgene_enet_init_bufpool(buf_pool); + ret = xgene_enet_refill_bufpool(buf_pool, pdata->rx_buff_cnt); + if (ret) { + xgene_enet_delete_desc_rings(pdata); + return ret; + } } - dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring); + dst_ring_num = xgene_enet_dst_ring_num(pdata->rx_ring[0]); + buf_pool = pdata->rx_ring[0]->buf_pool; if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) { /* Initialize and Enable PreClassifier Tree */ enet_cle->max_nodes = 512; @@ -1348,17 +1420,26 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) pdata->mac_ops = &xgene_gmac_ops; pdata->port_ops = &xgene_gport_ops; pdata->rm = RM3; + pdata->rxq_cnt = 1; + pdata->txq_cnt = 1; + pdata->cq_cnt = 0; break; case PHY_INTERFACE_MODE_SGMII: pdata->mac_ops = &xgene_sgmac_ops; pdata->port_ops = &xgene_sgport_ops; pdata->rm = RM1; + pdata->rxq_cnt = 1; + pdata->txq_cnt = 1; + pdata->cq_cnt = 1; break; default: pdata->mac_ops = &xgene_xgmac_ops; pdata->port_ops = &xgene_xgport_ops; pdata->cle_ops = &xgene_cle3in_ops; pdata->rm = RM0; + pdata->rxq_cnt = XGENE_NUM_RX_RING; + pdata->txq_cnt = XGENE_NUM_TX_RING; + pdata->cq_cnt = XGENE_NUM_TXC_RING; break; } @@ -1412,12 +1493,16 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + netif_napi_add(pdata->ndev, napi, xgene_enet_napi, + NAPI_POLL_WEIGHT); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_add(pdata->ndev, napi, xgene_enet_napi, NAPI_POLL_WEIGHT); } @@ -1426,12 +1511,15 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata) static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata) { struct napi_struct *napi; + int i; - napi = &pdata->rx_ring->napi; - netif_napi_del(napi); + for (i = 0; i < pdata->rxq_cnt; i++) { + napi = &pdata->rx_ring[i]->napi; + netif_napi_del(napi); + } - if (pdata->cq_cnt) { - napi = &pdata->tx_ring->cp_ring->napi; + for (i = 0; i < pdata->cq_cnt; i++) { + napi = &pdata->tx_ring[i]->cp_ring->napi; netif_napi_del(napi); } } @@ -1445,7 +1533,8 @@ static int xgene_enet_probe(struct platform_device *pdev) const struct of_device_id *of_id; int ret; - ndev = alloc_etherdev(sizeof(struct xgene_enet_pdata)); + ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata), + XGENE_NUM_RX_RING, XGENE_NUM_TX_RING); if (!ndev) return -ENOMEM; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 05365c1b12db..175d18890c7a 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -49,6 +49,11 @@ #define XGENE_ENET_MSS 1448 #define XGENE_MIN_ENET_FRAME_SIZE 60 +#define XGENE_MAX_ENET_IRQ 8 +#define XGENE_NUM_RX_RING 4 +#define XGENE_NUM_TX_RING 4 +#define XGENE_NUM_TXC_RING 4 + #define START_CPU_BUFNUM_0 0 #define START_ETH_BUFNUM_0 2 #define START_BP_BUFNUM_0 0x22 @@ -73,7 +78,6 @@ #define X2_START_RING_NUM_1 256 #define IRQ_ID_SIZE 16 -#define XGENE_MAX_TXC_RINGS 1 #define PHY_POLL_LINK_ON (10 * HZ) #define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) @@ -103,6 +107,7 @@ struct xgene_enet_desc_ring { void *irq_mbox_addr; u16 dst_ring_num; u8 nbufpool; + u8 index; struct sk_buff *(*rx_skb); struct sk_buff *(*cp_skb); dma_addr_t *frag_dma_addr; @@ -144,6 +149,7 @@ struct xgene_ring_ops { void (*clear)(struct xgene_enet_desc_ring *); void (*wr_cmd)(struct xgene_enet_desc_ring *, int); u32 (*len)(struct xgene_enet_desc_ring *); + void (*coalesce)(struct xgene_enet_desc_ring *); }; struct xgene_cle_ops { @@ -159,15 +165,16 @@ struct xgene_enet_pdata { struct clk *clk; struct platform_device *pdev; enum xgene_enet_id enet_id; - struct xgene_enet_desc_ring *tx_ring; - struct xgene_enet_desc_ring *rx_ring; - u16 tx_level; - u16 txc_level; + struct xgene_enet_desc_ring *tx_ring[XGENE_NUM_TX_RING]; + struct xgene_enet_desc_ring *rx_ring[XGENE_NUM_RX_RING]; + u16 tx_level[XGENE_NUM_TX_RING]; + u16 txc_level[XGENE_NUM_TX_RING]; char *dev_name; u32 rx_buff_cnt; u32 tx_qcnt_hi; - u32 rx_irq; - u32 txc_irq; + u32 irqs[XGENE_MAX_ENET_IRQ]; + u8 rxq_cnt; + u8 txq_cnt; u8 cq_cnt; void __iomem *eth_csr_addr; void __iomem *eth_ring_if_addr; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 0b6896bb351e..2b76732add5d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c @@ -190,6 +190,17 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) return num_msgs; } +static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) +{ + u32 data = 0x7777; + + xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); + xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); + xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); +} + struct xgene_ring_ops xgene_ring2_ops = { .num_ring_config = X2_NUM_RING_CONFIG, .num_ring_id_shift = 13, @@ -197,4 +208,5 @@ struct xgene_ring_ops xgene_ring2_ops = { .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, + .coalesce = xgene_enet_setup_coalescing, }; -- cgit v1.2.3 From b15edf852a2a92ccc0ad77340b9d032545953c43 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:39 +0100 Subject: rocker: remove unused rocker_port param from alloc funcs and shorten their names No need to pass rocker_port around to alloc/free rocker functions, since they now use switchdev_trans for memory management storage. With the param removal, shorten the name of the functions since they now has nothing to do with rocker port. Signed-off-by: Jiri Pirko Acked-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 87 +++++++++++++++++------------------- 1 file changed, 42 insertions(+), 45 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 166a7fc87e2f..e91efd70427d 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -341,9 +341,8 @@ static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) #define ROCKER_OP_FLAG_LEARNED BIT(2) #define ROCKER_OP_FLAG_REFRESH BIT(3) -static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t size) +static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags, + size_t size) { struct switchdev_trans_item *elem = NULL; gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ? @@ -372,21 +371,19 @@ static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port, return elem ? elem + 1 : NULL; } -static void *rocker_port_kzalloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t size) +static void *rocker_kzalloc(struct switchdev_trans *trans, int flags, + size_t size) { - return __rocker_port_mem_alloc(rocker_port, trans, flags, size); + return __rocker_mem_alloc(trans, flags, size); } -static void *rocker_port_kcalloc(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - size_t n, size_t size) +static void *rocker_kcalloc(struct switchdev_trans *trans, int flags, + size_t n, size_t size) { - return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size); + return __rocker_mem_alloc(trans, flags, n * size); } -static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem) +static void rocker_kfree(struct switchdev_trans *trans, const void *mem) { struct switchdev_trans_item *elem; @@ -426,7 +423,7 @@ static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, { struct rocker_wait *wait; - wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait)); + wait = rocker_kzalloc(trans, flags, sizeof(*wait)); if (!wait) return NULL; rocker_wait_init(wait); @@ -436,7 +433,7 @@ static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, static void rocker_wait_destroy(struct switchdev_trans *trans, struct rocker_wait *wait) { - rocker_port_kfree(trans, wait); + rocker_kfree(trans, wait); } static bool rocker_wait_event_timeout(struct rocker_wait *wait, @@ -2441,7 +2438,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, match->cookie = found->cookie; if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); - rocker_port_kfree(trans, found); + rocker_kfree(trans, found); found = match; found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; } else { @@ -2483,13 +2480,13 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - rocker_port_kfree(trans, match); + rocker_kfree(trans, match); if (found) { err = rocker_cmd_exec(rocker_port, trans, flags, rocker_cmd_flow_tbl_del, found, NULL, NULL); - rocker_port_kfree(trans, found); + rocker_kfree(trans, found); } return err; @@ -2512,7 +2509,7 @@ static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, { struct rocker_flow_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2534,7 +2531,7 @@ static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, { struct rocker_flow_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2561,7 +2558,7 @@ static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, { struct rocker_flow_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2601,7 +2598,7 @@ static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, bool dflt = !eth_dst || (eth_dst && eth_dst_mask); bool wild = false; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2651,7 +2648,7 @@ static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, { struct rocker_flow_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2681,7 +2678,7 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, u32 priority; struct rocker_flow_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2740,12 +2737,12 @@ static void rocker_group_tbl_entry_free(struct switchdev_trans *trans, switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - rocker_port_kfree(trans, entry->group_ids); + rocker_kfree(trans, entry->group_ids); break; default: break; } - rocker_port_kfree(trans, entry); + rocker_kfree(trans, entry); } static int rocker_group_tbl_add(struct rocker_port *rocker_port, @@ -2830,7 +2827,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port, { struct rocker_group_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2847,17 +2844,17 @@ static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, { struct rocker_group_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; entry->group_id = group_id; entry->group_count = group_count; - entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags, - group_count, sizeof(u32)); + entry->group_ids = rocker_kcalloc(trans, flags, + group_count, sizeof(u32)); if (!entry->group_ids) { - rocker_port_kfree(trans, entry); + rocker_kfree(trans, entry); return -ENOMEM; } memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); @@ -2882,7 +2879,7 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port, { struct rocker_group_tbl_entry *entry; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -2931,7 +2928,7 @@ static void _rocker_neigh_del(struct switchdev_trans *trans, return; if (--entry->ref_count == 0) { hash_del(&entry->entry); - rocker_port_kfree(trans, entry); + rocker_kfree(trans, entry); } } @@ -2965,7 +2962,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, bool removing; int err = 0; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -3034,7 +3031,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, err_out: if (!adding) - rocker_port_kfree(trans, entry); + rocker_kfree(trans, entry); return err; } @@ -3082,7 +3079,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, bool resolved = true; int err = 0; - entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry)); + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); if (!entry) return -ENOMEM; @@ -3114,7 +3111,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); if (!adding) - rocker_port_kfree(trans, entry); + rocker_kfree(trans, entry); if (err) return err; @@ -3139,8 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, int err = 0; int i; - group_ids = rocker_port_kcalloc(rocker_port, trans, flags, - rocker->port_count, sizeof(u32)); + group_ids = rocker_kcalloc(trans, flags, + rocker->port_count, sizeof(u32)); if (!group_ids) return -ENOMEM; @@ -3172,7 +3169,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, "Error (%d) port VLAN l2 flood group\n", err); no_ports_in_vlan: - rocker_port_kfree(trans, group_ids); + rocker_kfree(trans, group_ids); return err; } @@ -3540,7 +3537,7 @@ static void rocker_port_fdb_learn_work(struct work_struct *work) lw->rocker_port->dev, &info.info); rtnl_unlock(); - rocker_port_kfree(lw->trans, work); + rocker_kfree(lw->trans, work); } static int rocker_port_fdb_learn(struct rocker_port *rocker_port, @@ -3574,7 +3571,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, if (!rocker_port_is_bridged(rocker_port)) return 0; - lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw)); + lw = rocker_kzalloc(trans, flags, sizeof(*lw)); if (!lw) return -ENOMEM; @@ -3587,7 +3584,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port, lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); if (switchdev_trans_ph_prepare(trans)) - rocker_port_kfree(trans, lw); + rocker_kfree(trans, lw); else schedule_work(&lw->work); @@ -3618,7 +3615,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, bool removing = (flags & ROCKER_OP_FLAG_REMOVE); unsigned long lock_flags; - fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb)); + fdb = rocker_kzalloc(trans, flags, sizeof(*fdb)); if (!fdb) return -ENOMEM; @@ -3636,7 +3633,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, if (found) { found->touched = jiffies; if (removing) { - rocker_port_kfree(trans, fdb); + rocker_kfree(trans, fdb); if (!switchdev_trans_ph_prepare(trans)) hash_del(&found->entry); } @@ -3650,7 +3647,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, /* Check if adding and already exists, or removing and can't find */ if (!found != !removing) { - rocker_port_kfree(trans, fdb); + rocker_kfree(trans, fdb); if (!found && removing) return 0; /* Refreshing existing to update aging timers */ -- cgit v1.2.3 From 0fe685f6a2831c8868e0a4389c76521d7fd42026 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:40 +0100 Subject: rocker: rename rocker.h to rocker_hw.h Since "rocker.h" file is going to be used for different purpose, rename the hardware-specific header to "rocker_hw.h". Signed-off-by: Jiri Pirko Acked-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.c | 2 +- drivers/net/ethernet/rocker/rocker.h | 467 -------------------------------- drivers/net/ethernet/rocker/rocker_hw.h | 467 ++++++++++++++++++++++++++++++++ 3 files changed, 468 insertions(+), 468 deletions(-) delete mode 100644 drivers/net/ethernet/rocker/rocker.h create mode 100644 drivers/net/ethernet/rocker/rocker_hw.h (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index e91efd70427d..3f1849877356 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -39,7 +39,7 @@ #include #include -#include "rocker.h" +#include "rocker_hw.h" static const char rocker_driver_name[] = "rocker"; diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h deleted file mode 100644 index 12490b2f6504..000000000000 --- a/drivers/net/ethernet/rocker/rocker.h +++ /dev/null @@ -1,467 +0,0 @@ -/* - * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver - * Copyright (c) 2014 Jiri Pirko - * Copyright (c) 2014 Scott Feldman - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef _ROCKER_H -#define _ROCKER_H - -#include - -/* Return codes */ -enum { - ROCKER_OK = 0, - ROCKER_ENOENT = 2, - ROCKER_ENXIO = 6, - ROCKER_ENOMEM = 12, - ROCKER_EEXIST = 17, - ROCKER_EINVAL = 22, - ROCKER_EMSGSIZE = 90, - ROCKER_ENOTSUP = 95, - ROCKER_ENOBUFS = 105, -}; - -#define ROCKER_FP_PORTS_MAX 62 - -#define PCI_VENDOR_ID_REDHAT 0x1b36 -#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 - -#define ROCKER_PCI_BAR0_SIZE 0x2000 - -/* MSI-X vectors */ -enum { - ROCKER_MSIX_VEC_CMD, - ROCKER_MSIX_VEC_EVENT, - ROCKER_MSIX_VEC_TEST, - ROCKER_MSIX_VEC_RESERVED0, - __ROCKER_MSIX_VEC_TX, - __ROCKER_MSIX_VEC_RX, -#define ROCKER_MSIX_VEC_TX(port) \ - (__ROCKER_MSIX_VEC_TX + ((port) * 2)) -#define ROCKER_MSIX_VEC_RX(port) \ - (__ROCKER_MSIX_VEC_RX + ((port) * 2)) -#define ROCKER_MSIX_VEC_COUNT(portcnt) \ - (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) -}; - -/* Rocker bogus registers */ -#define ROCKER_BOGUS_REG0 0x0000 -#define ROCKER_BOGUS_REG1 0x0004 -#define ROCKER_BOGUS_REG2 0x0008 -#define ROCKER_BOGUS_REG3 0x000c - -/* Rocker test registers */ -#define ROCKER_TEST_REG 0x0010 -#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ -#define ROCKER_TEST_IRQ 0x0020 -#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ -#define ROCKER_TEST_DMA_SIZE 0x0030 -#define ROCKER_TEST_DMA_CTRL 0x0034 - -/* Rocker test register ctrl */ -#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0) -#define ROCKER_TEST_DMA_CTRL_FILL BIT(1) -#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2) - -/* Rocker DMA ring register offsets */ -#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ -#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) -#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) -#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) -#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) -#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) -#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) - -/* Rocker dma ctrl register bits */ -#define ROCKER_DMA_DESC_CTRL_RESET BIT(0) - -/* Rocker DMA ring types */ -enum rocker_dma_type { - ROCKER_DMA_CMD, - ROCKER_DMA_EVENT, - __ROCKER_DMA_TX, - __ROCKER_DMA_RX, -#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) -#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) -}; - -/* Rocker DMA ring size limits and default sizes */ -#define ROCKER_DMA_SIZE_MIN 2ul -#define ROCKER_DMA_SIZE_MAX 65536ul -#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul -#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul -#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul -#define ROCKER_DMA_TX_DESC_SIZE 256 -#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul -#define ROCKER_DMA_RX_DESC_SIZE 256 - -/* Rocker DMA descriptor struct */ -struct rocker_desc { - u64 buf_addr; - u64 cookie; - u16 buf_size; - u16 tlv_size; - u16 resv[5]; - u16 comp_err; -}; - -#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15) - -/* Rocker DMA TLV struct */ -struct rocker_tlv { - u32 type; - u16 len; -}; - -/* TLVs */ -enum { - ROCKER_TLV_CMD_UNSPEC, - ROCKER_TLV_CMD_TYPE, /* u16 */ - ROCKER_TLV_CMD_INFO, /* nest */ - - __ROCKER_TLV_CMD_MAX, - ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_TYPE_UNSPEC, - ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, - ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, - ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, - - ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, - ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, - - __ROCKER_TLV_CMD_TYPE_MAX, - ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, - ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ - ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ - ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ - ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ - ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ - ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ - - __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - ROCKER_TLV_CMD_PORT_SETTINGS_MAX = - __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, -}; - -enum { - ROCKER_TLV_CMD_PORT_STATS_UNSPEC, - ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ - - ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ - - ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ - ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ - - __ROCKER_TLV_CMD_PORT_STATS_MAX, - ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, -}; - -enum rocker_port_mode { - ROCKER_PORT_MODE_OF_DPA, -}; - -enum { - ROCKER_TLV_EVENT_UNSPEC, - ROCKER_TLV_EVENT_TYPE, /* u16 */ - ROCKER_TLV_EVENT_INFO, /* nest */ - - __ROCKER_TLV_EVENT_MAX, - ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_TYPE_UNSPEC, - ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, - ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, - - __ROCKER_TLV_EVENT_TYPE_MAX, - ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, - ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ - ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ - - __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, - ROCKER_TLV_EVENT_LINK_CHANGED_MAX = - __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, -}; - -enum { - ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, - ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ - ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ - ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ - - __ROCKER_TLV_EVENT_MAC_VLAN_MAX, - ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, -}; - -enum { - ROCKER_TLV_RX_UNSPEC, - ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ - ROCKER_TLV_RX_CSUM, /* u16 */ - ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ - ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ - ROCKER_TLV_RX_FRAG_LEN, /* u16 */ - - __ROCKER_TLV_RX_MAX, - ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, -}; - -#define ROCKER_RX_FLAGS_IPV4 BIT(0) -#define ROCKER_RX_FLAGS_IPV6 BIT(1) -#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2) -#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3) -#define ROCKER_RX_FLAGS_IP_FRAG BIT(4) -#define ROCKER_RX_FLAGS_TCP BIT(5) -#define ROCKER_RX_FLAGS_UDP BIT(6) -#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) -#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) - -enum { - ROCKER_TLV_TX_UNSPEC, - ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ - ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ - ROCKER_TLV_TX_TSO_MSS, /* u16 */ - ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ - ROCKER_TLV_TX_FRAGS, /* array */ - - __ROCKER_TLV_TX_MAX, - ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, -}; - -#define ROCKER_TX_OFFLOAD_NONE 0 -#define ROCKER_TX_OFFLOAD_IP_CSUM 1 -#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 -#define ROCKER_TX_OFFLOAD_L3_CSUM 3 -#define ROCKER_TX_OFFLOAD_TSO 4 - -#define ROCKER_TX_FRAGS_MAX 16 - -enum { - ROCKER_TLV_TX_FRAG_UNSPEC, - ROCKER_TLV_TX_FRAG, /* nest */ - - __ROCKER_TLV_TX_FRAG_MAX, - ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, -}; - -enum { - ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, - ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ - ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ - - __ROCKER_TLV_TX_FRAG_ATTR_MAX, - ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, -}; - -/* cmd info nested for OF-DPA msgs */ -enum { - ROCKER_TLV_OF_DPA_UNSPEC, - ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ - ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ - ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ - ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ - ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ - ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ - ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ - ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ - ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ - ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ - ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ - ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ - ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ - ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ - ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ - ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ - ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ - ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ - ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ - ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ - ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ - ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ - ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ - ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ - ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ - ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ - ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ - ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ - ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ - ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ - ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ - ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ - ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ - ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ - ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ - ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ - ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ - ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ - ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ - - __ROCKER_TLV_OF_DPA_MAX, - ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, -}; - -/* OF-DPA table IDs */ - -enum rocker_of_dpa_table_id { - ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, - ROCKER_OF_DPA_TABLE_ID_VLAN = 10, - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, - ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, -}; - -/* OF-DPA flow stats */ -enum { - ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, - ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ - ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ - ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ - - __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, - ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, -}; - -/* OF-DPA group types */ -enum rocker_of_dpa_group_type { - ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, - ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, - ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, - ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, - ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, - ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, - ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, - ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, - ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, -}; - -/* OF-DPA group L2 overlay types */ -enum rocker_of_dpa_overlay_type { - ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, - ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, - ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, - ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, -}; - -/* OF-DPA group ID encoding */ -#define ROCKER_GROUP_TYPE_SHIFT 28 -#define ROCKER_GROUP_TYPE_MASK 0xf0000000 -#define ROCKER_GROUP_VLAN_SHIFT 16 -#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 -#define ROCKER_GROUP_PORT_SHIFT 0 -#define ROCKER_GROUP_PORT_MASK 0x0000ffff -#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 -#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 -#define ROCKER_GROUP_SUBTYPE_SHIFT 10 -#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 -#define ROCKER_GROUP_INDEX_SHIFT 0 -#define ROCKER_GROUP_INDEX_MASK 0x0000ffff -#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 -#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff - -#define ROCKER_GROUP_TYPE_GET(group_id) \ - (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) -#define ROCKER_GROUP_TYPE_SET(type) \ - (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) -#define ROCKER_GROUP_VLAN_GET(group_id) \ - (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) -#define ROCKER_GROUP_VLAN_SET(vlan_id) \ - (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) -#define ROCKER_GROUP_PORT_GET(group_id) \ - (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) -#define ROCKER_GROUP_PORT_SET(port) \ - (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) -#define ROCKER_GROUP_INDEX_GET(group_id) \ - (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) -#define ROCKER_GROUP_INDEX_SET(index) \ - (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) -#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ - (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ - ROCKER_GROUP_INDEX_LONG_SHIFT) -#define ROCKER_GROUP_INDEX_LONG_SET(index) \ - (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ - ROCKER_GROUP_INDEX_LONG_MASK) - -#define ROCKER_GROUP_NONE 0 -#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) -#define ROCKER_GROUP_L2_REWRITE(index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ - ROCKER_GROUP_INDEX_LONG_SET(index)) -#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) -#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ - ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) -#define ROCKER_GROUP_L3_UNICAST(index) \ - (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ - ROCKER_GROUP_INDEX_LONG_SET(index)) - -/* Rocker general purpose registers */ -#define ROCKER_CONTROL 0x0300 -#define ROCKER_PORT_PHYS_COUNT 0x0304 -#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ -#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ -#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ - -/* Rocker control bits */ -#define ROCKER_CONTROL_RESET BIT(0) - -#endif diff --git a/drivers/net/ethernet/rocker/rocker_hw.h b/drivers/net/ethernet/rocker/rocker_hw.h new file mode 100644 index 000000000000..2adfe88859f2 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_hw.h @@ -0,0 +1,467 @@ +/* + * drivers/net/ethernet/rocker/rocker_hw.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_HW_H +#define _ROCKER_HW_H + +#include + +/* Return codes */ +enum { + ROCKER_OK = 0, + ROCKER_ENOENT = 2, + ROCKER_ENXIO = 6, + ROCKER_ENOMEM = 12, + ROCKER_EEXIST = 17, + ROCKER_EINVAL = 22, + ROCKER_EMSGSIZE = 90, + ROCKER_ENOTSUP = 95, + ROCKER_ENOBUFS = 105, +}; + +#define ROCKER_FP_PORTS_MAX 62 + +#define PCI_VENDOR_ID_REDHAT 0x1b36 +#define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 + +#define ROCKER_PCI_BAR0_SIZE 0x2000 + +/* MSI-X vectors */ +enum { + ROCKER_MSIX_VEC_CMD, + ROCKER_MSIX_VEC_EVENT, + ROCKER_MSIX_VEC_TEST, + ROCKER_MSIX_VEC_RESERVED0, + __ROCKER_MSIX_VEC_TX, + __ROCKER_MSIX_VEC_RX, +#define ROCKER_MSIX_VEC_TX(port) \ + (__ROCKER_MSIX_VEC_TX + ((port) * 2)) +#define ROCKER_MSIX_VEC_RX(port) \ + (__ROCKER_MSIX_VEC_RX + ((port) * 2)) +#define ROCKER_MSIX_VEC_COUNT(portcnt) \ + (ROCKER_MSIX_VEC_RX((portcnt - 1)) + 1) +}; + +/* Rocker bogus registers */ +#define ROCKER_BOGUS_REG0 0x0000 +#define ROCKER_BOGUS_REG1 0x0004 +#define ROCKER_BOGUS_REG2 0x0008 +#define ROCKER_BOGUS_REG3 0x000c + +/* Rocker test registers */ +#define ROCKER_TEST_REG 0x0010 +#define ROCKER_TEST_REG64 0x0018 /* 8-byte */ +#define ROCKER_TEST_IRQ 0x0020 +#define ROCKER_TEST_DMA_ADDR 0x0028 /* 8-byte */ +#define ROCKER_TEST_DMA_SIZE 0x0030 +#define ROCKER_TEST_DMA_CTRL 0x0034 + +/* Rocker test register ctrl */ +#define ROCKER_TEST_DMA_CTRL_CLEAR BIT(0) +#define ROCKER_TEST_DMA_CTRL_FILL BIT(1) +#define ROCKER_TEST_DMA_CTRL_INVERT BIT(2) + +/* Rocker DMA ring register offsets */ +#define ROCKER_DMA_DESC_ADDR(x) (0x1000 + (x) * 32) /* 8-byte */ +#define ROCKER_DMA_DESC_SIZE(x) (0x1008 + (x) * 32) +#define ROCKER_DMA_DESC_HEAD(x) (0x100c + (x) * 32) +#define ROCKER_DMA_DESC_TAIL(x) (0x1010 + (x) * 32) +#define ROCKER_DMA_DESC_CTRL(x) (0x1014 + (x) * 32) +#define ROCKER_DMA_DESC_CREDITS(x) (0x1018 + (x) * 32) +#define ROCKER_DMA_DESC_RES1(x) (0x101c + (x) * 32) + +/* Rocker dma ctrl register bits */ +#define ROCKER_DMA_DESC_CTRL_RESET BIT(0) + +/* Rocker DMA ring types */ +enum rocker_dma_type { + ROCKER_DMA_CMD, + ROCKER_DMA_EVENT, + __ROCKER_DMA_TX, + __ROCKER_DMA_RX, +#define ROCKER_DMA_TX(port) (__ROCKER_DMA_TX + (port) * 2) +#define ROCKER_DMA_RX(port) (__ROCKER_DMA_RX + (port) * 2) +}; + +/* Rocker DMA ring size limits and default sizes */ +#define ROCKER_DMA_SIZE_MIN 2ul +#define ROCKER_DMA_SIZE_MAX 65536ul +#define ROCKER_DMA_CMD_DEFAULT_SIZE 32ul +#define ROCKER_DMA_EVENT_DEFAULT_SIZE 32ul +#define ROCKER_DMA_TX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_TX_DESC_SIZE 256 +#define ROCKER_DMA_RX_DEFAULT_SIZE 64ul +#define ROCKER_DMA_RX_DESC_SIZE 256 + +/* Rocker DMA descriptor struct */ +struct rocker_desc { + u64 buf_addr; + u64 cookie; + u16 buf_size; + u16 tlv_size; + u16 resv[5]; + u16 comp_err; +}; + +#define ROCKER_DMA_DESC_COMP_ERR_GEN BIT(15) + +/* Rocker DMA TLV struct */ +struct rocker_tlv { + u32 type; + u16 len; +}; + +/* TLVs */ +enum { + ROCKER_TLV_CMD_UNSPEC, + ROCKER_TLV_CMD_TYPE, /* u16 */ + ROCKER_TLV_CMD_INFO, /* nest */ + + __ROCKER_TLV_CMD_MAX, + ROCKER_TLV_CMD_MAX = __ROCKER_TLV_CMD_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_TYPE_UNSPEC, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL, + ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS, + + ROCKER_TLV_CMD_TYPE_CLEAR_PORT_STATS, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS, + + __ROCKER_TLV_CMD_TYPE_MAX, + ROCKER_TLV_CMD_TYPE_MAX = __ROCKER_TLV_CMD_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_SETTINGS_UNSPEC, + ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, /* u32 */ + ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MODE, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, /* u8 */ + ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME, /* binary */ + ROCKER_TLV_CMD_PORT_SETTINGS_MTU, /* u16 */ + + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + ROCKER_TLV_CMD_PORT_SETTINGS_MAX = + __ROCKER_TLV_CMD_PORT_SETTINGS_MAX - 1, +}; + +enum { + ROCKER_TLV_CMD_PORT_STATS_UNSPEC, + ROCKER_TLV_CMD_PORT_STATS_PPORT, /* u32 */ + + ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, /* u64 */ + + ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, /* u64 */ + ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, /* u64 */ + + __ROCKER_TLV_CMD_PORT_STATS_MAX, + ROCKER_TLV_CMD_PORT_STATS_MAX = __ROCKER_TLV_CMD_PORT_STATS_MAX - 1, +}; + +enum rocker_port_mode { + ROCKER_PORT_MODE_OF_DPA, +}; + +enum { + ROCKER_TLV_EVENT_UNSPEC, + ROCKER_TLV_EVENT_TYPE, /* u16 */ + ROCKER_TLV_EVENT_INFO, /* nest */ + + __ROCKER_TLV_EVENT_MAX, + ROCKER_TLV_EVENT_MAX = __ROCKER_TLV_EVENT_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_TYPE_UNSPEC, + ROCKER_TLV_EVENT_TYPE_LINK_CHANGED, + ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN, + + __ROCKER_TLV_EVENT_TYPE_MAX, + ROCKER_TLV_EVENT_TYPE_MAX = __ROCKER_TLV_EVENT_TYPE_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_LINK_CHANGED_UNSPEC, + ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, /* u32 */ + ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP, /* u8 */ + + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX, + ROCKER_TLV_EVENT_LINK_CHANGED_MAX = + __ROCKER_TLV_EVENT_LINK_CHANGED_MAX - 1, +}; + +enum { + ROCKER_TLV_EVENT_MAC_VLAN_UNSPEC, + ROCKER_TLV_EVENT_MAC_VLAN_PPORT, /* u32 */ + ROCKER_TLV_EVENT_MAC_VLAN_MAC, /* binary */ + ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, /* __be16 */ + + __ROCKER_TLV_EVENT_MAC_VLAN_MAX, + ROCKER_TLV_EVENT_MAC_VLAN_MAX = __ROCKER_TLV_EVENT_MAC_VLAN_MAX - 1, +}; + +enum { + ROCKER_TLV_RX_UNSPEC, + ROCKER_TLV_RX_FLAGS, /* u16, see ROCKER_RX_FLAGS_ */ + ROCKER_TLV_RX_CSUM, /* u16 */ + ROCKER_TLV_RX_FRAG_ADDR, /* u64 */ + ROCKER_TLV_RX_FRAG_MAX_LEN, /* u16 */ + ROCKER_TLV_RX_FRAG_LEN, /* u16 */ + + __ROCKER_TLV_RX_MAX, + ROCKER_TLV_RX_MAX = __ROCKER_TLV_RX_MAX - 1, +}; + +#define ROCKER_RX_FLAGS_IPV4 BIT(0) +#define ROCKER_RX_FLAGS_IPV6 BIT(1) +#define ROCKER_RX_FLAGS_CSUM_CALC BIT(2) +#define ROCKER_RX_FLAGS_IPV4_CSUM_GOOD BIT(3) +#define ROCKER_RX_FLAGS_IP_FRAG BIT(4) +#define ROCKER_RX_FLAGS_TCP BIT(5) +#define ROCKER_RX_FLAGS_UDP BIT(6) +#define ROCKER_RX_FLAGS_TCP_UDP_CSUM_GOOD BIT(7) +#define ROCKER_RX_FLAGS_FWD_OFFLOAD BIT(8) + +enum { + ROCKER_TLV_TX_UNSPEC, + ROCKER_TLV_TX_OFFLOAD, /* u8, see ROCKER_TX_OFFLOAD_ */ + ROCKER_TLV_TX_L3_CSUM_OFF, /* u16 */ + ROCKER_TLV_TX_TSO_MSS, /* u16 */ + ROCKER_TLV_TX_TSO_HDR_LEN, /* u16 */ + ROCKER_TLV_TX_FRAGS, /* array */ + + __ROCKER_TLV_TX_MAX, + ROCKER_TLV_TX_MAX = __ROCKER_TLV_TX_MAX - 1, +}; + +#define ROCKER_TX_OFFLOAD_NONE 0 +#define ROCKER_TX_OFFLOAD_IP_CSUM 1 +#define ROCKER_TX_OFFLOAD_TCP_UDP_CSUM 2 +#define ROCKER_TX_OFFLOAD_L3_CSUM 3 +#define ROCKER_TX_OFFLOAD_TSO 4 + +#define ROCKER_TX_FRAGS_MAX 16 + +enum { + ROCKER_TLV_TX_FRAG_UNSPEC, + ROCKER_TLV_TX_FRAG, /* nest */ + + __ROCKER_TLV_TX_FRAG_MAX, + ROCKER_TLV_TX_FRAG_MAX = __ROCKER_TLV_TX_FRAG_MAX - 1, +}; + +enum { + ROCKER_TLV_TX_FRAG_ATTR_UNSPEC, + ROCKER_TLV_TX_FRAG_ATTR_ADDR, /* u64 */ + ROCKER_TLV_TX_FRAG_ATTR_LEN, /* u16 */ + + __ROCKER_TLV_TX_FRAG_ATTR_MAX, + ROCKER_TLV_TX_FRAG_ATTR_MAX = __ROCKER_TLV_TX_FRAG_ATTR_MAX - 1, +}; + +/* cmd info nested for OF-DPA msgs */ +enum { + ROCKER_TLV_OF_DPA_UNSPEC, + ROCKER_TLV_OF_DPA_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_PRIORITY, /* u32 */ + ROCKER_TLV_OF_DPA_HARDTIME, /* u32 */ + ROCKER_TLV_OF_DPA_IDLETIME, /* u32 */ + ROCKER_TLV_OF_DPA_COOKIE, /* u64 */ + ROCKER_TLV_OF_DPA_IN_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_IN_PPORT_MASK, /* u32 */ + ROCKER_TLV_OF_DPA_OUT_PPORT, /* u32 */ + ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_ID, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, /* u32 */ + ROCKER_TLV_OF_DPA_GROUP_COUNT, /* u16 */ + ROCKER_TLV_OF_DPA_GROUP_IDS, /* u32 array */ + ROCKER_TLV_OF_DPA_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_ID_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_VLAN_PCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_ID, /* __be16 */ + ROCKER_TLV_OF_DPA_NEW_VLAN_PCP, /* u8 */ + ROCKER_TLV_OF_DPA_TUNNEL_ID, /* u32 */ + ROCKER_TLV_OF_DPA_TUNNEL_LPORT, /* u32 */ + ROCKER_TLV_OF_DPA_ETHERTYPE, /* __be16 */ + ROCKER_TLV_OF_DPA_DST_MAC, /* binary */ + ROCKER_TLV_OF_DPA_DST_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC, /* binary */ + ROCKER_TLV_OF_DPA_SRC_MAC_MASK, /* binary */ + ROCKER_TLV_OF_DPA_IP_PROTO, /* u8 */ + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IP_DSCP_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_IP_DSCP, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN, /* u8 */ + ROCKER_TLV_OF_DPA_IP_ECN_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_DST_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_DST_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_DST_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6, /* binary */ + ROCKER_TLV_OF_DPA_SRC_IPV6_MASK, /* binary */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP, /* __be32 */ + ROCKER_TLV_OF_DPA_SRC_ARP_IP_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_DST_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT, /* __be16 */ + ROCKER_TLV_OF_DPA_L4_SRC_PORT_MASK, /* __be16 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_TYPE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE, /* u8 */ + ROCKER_TLV_OF_DPA_ICMP_CODE_MASK, /* u8 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL, /* __be32 */ + ROCKER_TLV_OF_DPA_IPV6_LABEL_MASK, /* __be32 */ + ROCKER_TLV_OF_DPA_QUEUE_ID_ACTION, /* u8 */ + ROCKER_TLV_OF_DPA_NEW_QUEUE_ID, /* u8 */ + ROCKER_TLV_OF_DPA_CLEAR_ACTIONS, /* u32 */ + ROCKER_TLV_OF_DPA_POP_VLAN, /* u8 */ + ROCKER_TLV_OF_DPA_TTL_CHECK, /* u8 */ + ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, /* u8 */ + + __ROCKER_TLV_OF_DPA_MAX, + ROCKER_TLV_OF_DPA_MAX = __ROCKER_TLV_OF_DPA_MAX - 1, +}; + +/* OF-DPA table IDs */ + +enum rocker_of_dpa_table_id { + ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT = 0, + ROCKER_OF_DPA_TABLE_ID_VLAN = 10, + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC = 20, + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING = 30, + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING = 40, + ROCKER_OF_DPA_TABLE_ID_BRIDGING = 50, + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY = 60, +}; + +/* OF-DPA flow stats */ +enum { + ROCKER_TLV_OF_DPA_FLOW_STAT_UNSPEC, + ROCKER_TLV_OF_DPA_FLOW_STAT_DURATION, /* u32 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_RX_PKTS, /* u64 */ + ROCKER_TLV_OF_DPA_FLOW_STAT_TX_PKTS, /* u64 */ + + __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX, + ROCKER_TLV_OF_DPA_FLOW_STAT_MAX = __ROCKER_TLV_OF_DPA_FLOW_STAT_MAX - 1, +}; + +/* OF-DPA group types */ +enum rocker_of_dpa_group_type { + ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE = 0, + ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE, + ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD, + ROCKER_OF_DPA_GROUP_TYPE_L3_INTERFACE, + ROCKER_OF_DPA_GROUP_TYPE_L3_MCAST, + ROCKER_OF_DPA_GROUP_TYPE_L3_ECMP, + ROCKER_OF_DPA_GROUP_TYPE_L2_OVERLAY, +}; + +/* OF-DPA group L2 overlay types */ +enum rocker_of_dpa_overlay_type { + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_UCAST = 0, + ROCKER_OF_DPA_OVERLAY_TYPE_FLOOD_MCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_UCAST, + ROCKER_OF_DPA_OVERLAY_TYPE_MCAST_MCAST, +}; + +/* OF-DPA group ID encoding */ +#define ROCKER_GROUP_TYPE_SHIFT 28 +#define ROCKER_GROUP_TYPE_MASK 0xf0000000 +#define ROCKER_GROUP_VLAN_SHIFT 16 +#define ROCKER_GROUP_VLAN_MASK 0x0fff0000 +#define ROCKER_GROUP_PORT_SHIFT 0 +#define ROCKER_GROUP_PORT_MASK 0x0000ffff +#define ROCKER_GROUP_TUNNEL_ID_SHIFT 12 +#define ROCKER_GROUP_TUNNEL_ID_MASK 0x0ffff000 +#define ROCKER_GROUP_SUBTYPE_SHIFT 10 +#define ROCKER_GROUP_SUBTYPE_MASK 0x00000c00 +#define ROCKER_GROUP_INDEX_SHIFT 0 +#define ROCKER_GROUP_INDEX_MASK 0x0000ffff +#define ROCKER_GROUP_INDEX_LONG_SHIFT 0 +#define ROCKER_GROUP_INDEX_LONG_MASK 0x0fffffff + +#define ROCKER_GROUP_TYPE_GET(group_id) \ + (((group_id) & ROCKER_GROUP_TYPE_MASK) >> ROCKER_GROUP_TYPE_SHIFT) +#define ROCKER_GROUP_TYPE_SET(type) \ + (((type) << ROCKER_GROUP_TYPE_SHIFT) & ROCKER_GROUP_TYPE_MASK) +#define ROCKER_GROUP_VLAN_GET(group_id) \ + (((group_id) & ROCKER_GROUP_VLAN_ID_MASK) >> ROCKER_GROUP_VLAN_ID_SHIFT) +#define ROCKER_GROUP_VLAN_SET(vlan_id) \ + (((vlan_id) << ROCKER_GROUP_VLAN_SHIFT) & ROCKER_GROUP_VLAN_MASK) +#define ROCKER_GROUP_PORT_GET(group_id) \ + (((group_id) & ROCKER_GROUP_PORT_MASK) >> ROCKER_GROUP_PORT_SHIFT) +#define ROCKER_GROUP_PORT_SET(port) \ + (((port) << ROCKER_GROUP_PORT_SHIFT) & ROCKER_GROUP_PORT_MASK) +#define ROCKER_GROUP_INDEX_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_MASK) >> ROCKER_GROUP_INDEX_SHIFT) +#define ROCKER_GROUP_INDEX_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_SHIFT) & ROCKER_GROUP_INDEX_MASK) +#define ROCKER_GROUP_INDEX_LONG_GET(group_id) \ + (((group_id) & ROCKER_GROUP_INDEX_LONG_MASK) >> \ + ROCKER_GROUP_INDEX_LONG_SHIFT) +#define ROCKER_GROUP_INDEX_LONG_SET(index) \ + (((index) << ROCKER_GROUP_INDEX_LONG_SHIFT) & \ + ROCKER_GROUP_INDEX_LONG_MASK) + +#define ROCKER_GROUP_NONE 0 +#define ROCKER_GROUP_L2_INTERFACE(vlan_id, port) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_PORT_SET(port)) +#define ROCKER_GROUP_L2_REWRITE(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) +#define ROCKER_GROUP_L2_MCAST(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L2_FLOOD(vlan_id, index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD) |\ + ROCKER_GROUP_VLAN_SET(ntohs(vlan_id)) | ROCKER_GROUP_INDEX_SET(index)) +#define ROCKER_GROUP_L3_UNICAST(index) \ + (ROCKER_GROUP_TYPE_SET(ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST) |\ + ROCKER_GROUP_INDEX_LONG_SET(index)) + +/* Rocker general purpose registers */ +#define ROCKER_CONTROL 0x0300 +#define ROCKER_PORT_PHYS_COUNT 0x0304 +#define ROCKER_PORT_PHYS_LINK_STATUS 0x0310 /* 8-byte */ +#define ROCKER_PORT_PHYS_ENABLE 0x0318 /* 8-byte */ +#define ROCKER_SWITCH_ID 0x0320 /* 8-byte */ + +/* Rocker control bits */ +#define ROCKER_CONTROL_RESET BIT(0) + +#endif -- cgit v1.2.3 From 11ce2ba3d05b3ac330022da1f83b589a4f78569c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:41 +0100 Subject: rocker: rename rocker.c to rocker_main.c Since "rocker.c" is going to be split into multiple files, start with renaming original "rocker.c" file to "rocker_main.c". Multiple code parts are going to be cut from "rocker_main.c" later on. Fix couple of checkpatch issues on the way. Signed-off-by: Jiri Pirko Acked-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/Makefile | 1 + drivers/net/ethernet/rocker/rocker.c | 5492 ---------------------------- drivers/net/ethernet/rocker/rocker_main.c | 5493 +++++++++++++++++++++++++++++ 3 files changed, 5494 insertions(+), 5492 deletions(-) delete mode 100644 drivers/net/ethernet/rocker/rocker.c create mode 100644 drivers/net/ethernet/rocker/rocker_main.c (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile index f85fb12f36f1..2bed42385076 100644 --- a/drivers/net/ethernet/rocker/Makefile +++ b/drivers/net/ethernet/rocker/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_ROCKER) += rocker.o +rocker-y := rocker_main.o diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c deleted file mode 100644 index 3f1849877356..000000000000 --- a/drivers/net/ethernet/rocker/rocker.c +++ /dev/null @@ -1,5492 +0,0 @@ -/* - * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver - * Copyright (c) 2014 Jiri Pirko - * Copyright (c) 2014 Scott Feldman - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "rocker_hw.h" - -static const char rocker_driver_name[] = "rocker"; - -static const struct pci_device_id rocker_pci_id_table[] = { - {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, - {0, } -}; - -struct rocker_flow_tbl_key { - u32 priority; - enum rocker_of_dpa_table_id tbl_id; - union { - struct { - u32 in_pport; - u32 in_pport_mask; - enum rocker_of_dpa_table_id goto_tbl; - } ig_port; - struct { - u32 in_pport; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool untagged; - __be16 new_vlan_id; - } vlan; - struct { - u32 in_pport; - u32 in_pport_mask; - __be16 eth_type; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool copy_to_cpu; - } term_mac; - struct { - __be16 eth_type; - __be32 dst4; - __be32 dst4_mask; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - } ucast_routing; - struct { - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - int has_eth_dst; - int has_eth_dst_mask; - __be16 vlan_id; - u32 tunnel_id; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - bool copy_to_cpu; - } bridge; - struct { - u32 in_pport; - u32 in_pport_mask; - u8 eth_src[ETH_ALEN]; - u8 eth_src_mask[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 eth_type; - __be16 vlan_id; - __be16 vlan_id_mask; - u8 ip_proto; - u8 ip_proto_mask; - u8 ip_tos; - u8 ip_tos_mask; - u32 group_id; - } acl; - }; -}; - -struct rocker_flow_tbl_entry { - struct hlist_node entry; - u32 cmd; - u64 cookie; - struct rocker_flow_tbl_key key; - size_t key_len; - u32 key_crc32; /* key */ -}; - -struct rocker_group_tbl_entry { - struct hlist_node entry; - u32 cmd; - u32 group_id; /* key */ - u16 group_count; - u32 *group_ids; - union { - struct { - u8 pop_vlan; - } l2_interface; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - u32 group_id; - } l2_rewrite; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - bool ttl_check; - u32 group_id; - } l3_unicast; - }; -}; - -struct rocker_fdb_tbl_entry { - struct hlist_node entry; - u32 key_crc32; /* key */ - bool learned; - unsigned long touched; - struct rocker_fdb_tbl_key { - struct rocker_port *rocker_port; - u8 addr[ETH_ALEN]; - __be16 vlan_id; - } key; -}; - -struct rocker_internal_vlan_tbl_entry { - struct hlist_node entry; - int ifindex; /* key */ - u32 ref_count; - __be16 vlan_id; -}; - -struct rocker_neigh_tbl_entry { - struct hlist_node entry; - __be32 ip_addr; /* key */ - struct net_device *dev; - u32 ref_count; - u32 index; - u8 eth_dst[ETH_ALEN]; - bool ttl_check; -}; - -struct rocker_desc_info { - char *data; /* mapped */ - size_t data_size; - size_t tlv_size; - struct rocker_desc *desc; - dma_addr_t mapaddr; -}; - -struct rocker_dma_ring_info { - size_t size; - u32 head; - u32 tail; - struct rocker_desc *desc; /* mapped */ - dma_addr_t mapaddr; - struct rocker_desc_info *desc_info; - unsigned int type; -}; - -struct rocker; - -enum { - ROCKER_CTRL_LINK_LOCAL_MCAST, - ROCKER_CTRL_LOCAL_ARP, - ROCKER_CTRL_IPV4_MCAST, - ROCKER_CTRL_IPV6_MCAST, - ROCKER_CTRL_DFLT_BRIDGING, - ROCKER_CTRL_DFLT_OVS, - ROCKER_CTRL_MAX, -}; - -#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 -#define ROCKER_N_INTERNAL_VLANS 255 -#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) -#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) - -struct rocker_port { - struct net_device *dev; - struct net_device *bridge_dev; - struct rocker *rocker; - unsigned int port_number; - u32 pport; - __be16 internal_vlan_id; - int stp_state; - u32 brport_flags; - unsigned long ageing_time; - bool ctrls[ROCKER_CTRL_MAX]; - unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; - struct napi_struct napi_tx; - struct napi_struct napi_rx; - struct rocker_dma_ring_info tx_ring; - struct rocker_dma_ring_info rx_ring; -}; - -struct rocker { - struct pci_dev *pdev; - u8 __iomem *hw_addr; - struct msix_entry *msix_entries; - unsigned int port_count; - struct rocker_port **ports; - struct { - u64 id; - } hw; - spinlock_t cmd_ring_lock; /* for cmd ring accesses */ - struct rocker_dma_ring_info cmd_ring; - struct rocker_dma_ring_info event_ring; - DECLARE_HASHTABLE(flow_tbl, 16); - spinlock_t flow_tbl_lock; /* for flow tbl accesses */ - u64 flow_tbl_next_cookie; - DECLARE_HASHTABLE(group_tbl, 16); - spinlock_t group_tbl_lock; /* for group tbl accesses */ - struct timer_list fdb_cleanup_timer; - DECLARE_HASHTABLE(fdb_tbl, 16); - spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ - unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; - DECLARE_HASHTABLE(internal_vlan_tbl, 8); - spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ - DECLARE_HASHTABLE(neigh_tbl, 16); - spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ - u32 neigh_tbl_next_index; -}; - -static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; -static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; -static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; - -/* Rocker priority levels for flow table entries. Higher - * priority match takes precedence over lower priority match. - */ - -enum { - ROCKER_PRIORITY_UNKNOWN = 0, - ROCKER_PRIORITY_IG_PORT = 1, - ROCKER_PRIORITY_VLAN = 1, - ROCKER_PRIORITY_TERM_MAC_UCAST = 0, - ROCKER_PRIORITY_TERM_MAC_MCAST = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_VLAN = 3, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_TENANT = 3, - ROCKER_PRIORITY_ACL_CTRL = 3, - ROCKER_PRIORITY_ACL_NORMAL = 2, - ROCKER_PRIORITY_ACL_DFLT = 1, -}; - -static bool rocker_vlan_id_is_internal(__be16 vlan_id) -{ - u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; - u16 end = 0xffe; - u16 _vlan_id = ntohs(vlan_id); - - return (_vlan_id >= start && _vlan_id <= end); -} - -static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port, - u16 vid, bool *pop_vlan) -{ - __be16 vlan_id; - - if (pop_vlan) - *pop_vlan = false; - vlan_id = htons(vid); - if (!vlan_id) { - vlan_id = rocker_port->internal_vlan_id; - if (pop_vlan) - *pop_vlan = true; - } - - return vlan_id; -} - -static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, - __be16 vlan_id) -{ - if (rocker_vlan_id_is_internal(vlan_id)) - return 0; - - return ntohs(vlan_id); -} - -static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_bridge_master(rocker_port->bridge_dev); -} - -static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_ovs_master(rocker_port->bridge_dev); -} - -#define ROCKER_OP_FLAG_REMOVE BIT(0) -#define ROCKER_OP_FLAG_NOWAIT BIT(1) -#define ROCKER_OP_FLAG_LEARNED BIT(2) -#define ROCKER_OP_FLAG_REFRESH BIT(3) - -static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags, - size_t size) -{ - struct switchdev_trans_item *elem = NULL; - gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ? - GFP_ATOMIC : GFP_KERNEL; - - /* If in transaction prepare phase, allocate the memory - * and enqueue it on a transaction. If in transaction - * commit phase, dequeue the memory from the transaction - * rather than re-allocating the memory. The idea is the - * driver code paths for prepare and commit are identical - * so the memory allocated in the prepare phase is the - * memory used in the commit phase. - */ - - if (!trans) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - } else if (switchdev_trans_ph_prepare(trans)) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - if (!elem) - return NULL; - switchdev_trans_item_enqueue(trans, elem, kfree, elem); - } else { - elem = switchdev_trans_item_dequeue(trans); - } - - return elem ? elem + 1 : NULL; -} - -static void *rocker_kzalloc(struct switchdev_trans *trans, int flags, - size_t size) -{ - return __rocker_mem_alloc(trans, flags, size); -} - -static void *rocker_kcalloc(struct switchdev_trans *trans, int flags, - size_t n, size_t size) -{ - return __rocker_mem_alloc(trans, flags, n * size); -} - -static void rocker_kfree(struct switchdev_trans *trans, const void *mem) -{ - struct switchdev_trans_item *elem; - - /* Frees are ignored if in transaction prepare phase. The - * memory remains on the per-port list until freed in the - * commit phase. - */ - - if (switchdev_trans_ph_prepare(trans)) - return; - - elem = (struct switchdev_trans_item *) mem - 1; - kfree(elem); -} - -struct rocker_wait { - wait_queue_head_t wait; - bool done; - bool nowait; -}; - -static void rocker_wait_reset(struct rocker_wait *wait) -{ - wait->done = false; - wait->nowait = false; -} - -static void rocker_wait_init(struct rocker_wait *wait) -{ - init_waitqueue_head(&wait->wait); - rocker_wait_reset(wait); -} - -static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags) -{ - struct rocker_wait *wait; - - wait = rocker_kzalloc(trans, flags, sizeof(*wait)); - if (!wait) - return NULL; - rocker_wait_init(wait); - return wait; -} - -static void rocker_wait_destroy(struct switchdev_trans *trans, - struct rocker_wait *wait) -{ - rocker_kfree(trans, wait); -} - -static bool rocker_wait_event_timeout(struct rocker_wait *wait, - unsigned long timeout) -{ - wait_event_timeout(wait->wait, wait->done, HZ / 10); - if (!wait->done) - return false; - return true; -} - -static void rocker_wait_wake_up(struct rocker_wait *wait) -{ - wait->done = true; - wake_up(&wait->wait); -} - -static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) -{ - return rocker->msix_entries[vector].vector; -} - -static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) -{ - return rocker_msix_vector(rocker_port->rocker, - ROCKER_MSIX_VEC_TX(rocker_port->port_number)); -} - -static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) -{ - return rocker_msix_vector(rocker_port->rocker, - ROCKER_MSIX_VEC_RX(rocker_port->port_number)); -} - -#define rocker_write32(rocker, reg, val) \ - writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_read32(rocker, reg) \ - readl((rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_write64(rocker, reg, val) \ - writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) -#define rocker_read64(rocker, reg) \ - readq((rocker)->hw_addr + (ROCKER_ ## reg)) - -/***************************** - * HW basic testing functions - *****************************/ - -static int rocker_reg_test(const struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - u64 test_reg; - u64 rnd; - - rnd = prandom_u32(); - rnd >>= 1; - rocker_write32(rocker, TEST_REG, rnd); - test_reg = rocker_read32(rocker, TEST_REG); - if (test_reg != rnd * 2) { - dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", - test_reg, rnd * 2); - return -EIO; - } - - rnd = prandom_u32(); - rnd <<= 31; - rnd |= prandom_u32(); - rocker_write64(rocker, TEST_REG64, rnd); - test_reg = rocker_read64(rocker, TEST_REG64); - if (test_reg != rnd * 2) { - dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", - test_reg, rnd * 2); - return -EIO; - } - - return 0; -} - -static int rocker_dma_test_one(const struct rocker *rocker, - struct rocker_wait *wait, u32 test_type, - dma_addr_t dma_handle, const unsigned char *buf, - const unsigned char *expect, size_t size) -{ - const struct pci_dev *pdev = rocker->pdev; - int i; - - rocker_wait_reset(wait); - rocker_write32(rocker, TEST_DMA_CTRL, test_type); - - if (!rocker_wait_event_timeout(wait, HZ / 10)) { - dev_err(&pdev->dev, "no interrupt received within a timeout\n"); - return -EIO; - } - - for (i = 0; i < size; i++) { - if (buf[i] != expect[i]) { - dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", - buf[i], i, expect[i]); - return -EIO; - } - } - return 0; -} - -#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) -#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 - -static int rocker_dma_test_offset(const struct rocker *rocker, - struct rocker_wait *wait, int offset) -{ - struct pci_dev *pdev = rocker->pdev; - unsigned char *alloc; - unsigned char *buf; - unsigned char *expect; - dma_addr_t dma_handle; - int i; - int err; - - alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, - GFP_KERNEL | GFP_DMA); - if (!alloc) - return -ENOMEM; - buf = alloc + offset; - expect = buf + ROCKER_TEST_DMA_BUF_SIZE; - - dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); - if (pci_dma_mapping_error(pdev, dma_handle)) { - err = -EIO; - goto free_alloc; - } - - rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); - rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); - - memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - - memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - - prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); - for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) - expect[i] = ~buf[i]; - err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, - dma_handle, buf, expect, - ROCKER_TEST_DMA_BUF_SIZE); - if (err) - goto unmap; - -unmap: - pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, - PCI_DMA_BIDIRECTIONAL); -free_alloc: - kfree(alloc); - - return err; -} - -static int rocker_dma_test(const struct rocker *rocker, - struct rocker_wait *wait) -{ - int i; - int err; - - for (i = 0; i < 8; i++) { - err = rocker_dma_test_offset(rocker, wait, i); - if (err) - return err; - } - return 0; -} - -static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) -{ - struct rocker_wait *wait = dev_id; - - rocker_wait_wake_up(wait); - - return IRQ_HANDLED; -} - -static int rocker_basic_hw_test(const struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - struct rocker_wait wait; - int err; - - err = rocker_reg_test(rocker); - if (err) { - dev_err(&pdev->dev, "reg test failed\n"); - return err; - } - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), - rocker_test_irq_handler, 0, - rocker_driver_name, &wait); - if (err) { - dev_err(&pdev->dev, "cannot assign test irq\n"); - return err; - } - - rocker_wait_init(&wait); - rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); - - if (!rocker_wait_event_timeout(&wait, HZ / 10)) { - dev_err(&pdev->dev, "no interrupt received within a timeout\n"); - err = -EIO; - goto free_irq; - } - - err = rocker_dma_test(rocker, &wait); - if (err) - dev_err(&pdev->dev, "dma test failed\n"); - -free_irq: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); - return err; -} - -/****** - * TLV - ******/ - -#define ROCKER_TLV_ALIGNTO 8U -#define ROCKER_TLV_ALIGN(len) \ - (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) -#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) - -/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * | Header | Pad | Payload | Pad | - * | (struct rocker_tlv) | ing | | ing | - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * <--------------------------- tlv->len --------------------------> - */ - -static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, - int *remaining) -{ - int totlen = ROCKER_TLV_ALIGN(tlv->len); - - *remaining -= totlen; - return (struct rocker_tlv *) ((char *) tlv + totlen); -} - -static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) -{ - return remaining >= (int) ROCKER_TLV_HDRLEN && - tlv->len >= ROCKER_TLV_HDRLEN && - tlv->len <= remaining; -} - -#define rocker_tlv_for_each(pos, head, len, rem) \ - for (pos = head, rem = len; \ - rocker_tlv_ok(pos, rem); \ - pos = rocker_tlv_next(pos, &(rem))) - -#define rocker_tlv_for_each_nested(pos, tlv, rem) \ - rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ - rocker_tlv_len(tlv), rem) - -static int rocker_tlv_attr_size(int payload) -{ - return ROCKER_TLV_HDRLEN + payload; -} - -static int rocker_tlv_total_size(int payload) -{ - return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); -} - -static int rocker_tlv_padlen(int payload) -{ - return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); -} - -static int rocker_tlv_type(const struct rocker_tlv *tlv) -{ - return tlv->type; -} - -static void *rocker_tlv_data(const struct rocker_tlv *tlv) -{ - return (char *) tlv + ROCKER_TLV_HDRLEN; -} - -static int rocker_tlv_len(const struct rocker_tlv *tlv) -{ - return tlv->len - ROCKER_TLV_HDRLEN; -} - -static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) -{ - return *(u8 *) rocker_tlv_data(tlv); -} - -static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) -{ - return *(u16 *) rocker_tlv_data(tlv); -} - -static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) -{ - return *(__be16 *) rocker_tlv_data(tlv); -} - -static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) -{ - return *(u32 *) rocker_tlv_data(tlv); -} - -static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) -{ - return *(u64 *) rocker_tlv_data(tlv); -} - -static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, - const char *buf, int buf_len) -{ - const struct rocker_tlv *tlv; - const struct rocker_tlv *head = (const struct rocker_tlv *) buf; - int rem; - - memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); - - rocker_tlv_for_each(tlv, head, buf_len, rem) { - u32 type = rocker_tlv_type(tlv); - - if (type > 0 && type <= maxtype) - tb[type] = tlv; - } -} - -static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype, - const struct rocker_tlv *tlv) -{ - rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), - rocker_tlv_len(tlv)); -} - -static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, - const struct rocker_desc_info *desc_info) -{ - rocker_tlv_parse(tb, maxtype, desc_info->data, - desc_info->desc->tlv_size); -} - -static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) -{ - return (struct rocker_tlv *) ((char *) desc_info->data + - desc_info->tlv_size); -} - -static int rocker_tlv_put(struct rocker_desc_info *desc_info, - int attrtype, int attrlen, const void *data) -{ - int tail_room = desc_info->data_size - desc_info->tlv_size; - int total_size = rocker_tlv_total_size(attrlen); - struct rocker_tlv *tlv; - - if (unlikely(tail_room < total_size)) - return -EMSGSIZE; - - tlv = rocker_tlv_start(desc_info); - desc_info->tlv_size += total_size; - tlv->type = attrtype; - tlv->len = rocker_tlv_attr_size(attrlen); - memcpy(rocker_tlv_data(tlv), data, attrlen); - memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); - return 0; -} - -static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, - int attrtype, u8 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); -} - -static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, - int attrtype, u16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); -} - -static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, - int attrtype, __be16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); -} - -static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, - int attrtype, u32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); -} - -static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, - int attrtype, __be32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); -} - -static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, - int attrtype, u64 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); -} - -static struct rocker_tlv * -rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) -{ - struct rocker_tlv *start = rocker_tlv_start(desc_info); - - if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) - return NULL; - - return start; -} - -static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, - struct rocker_tlv *start) -{ - start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; -} - -static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, - const struct rocker_tlv *start) -{ - desc_info->tlv_size = (const char *) start - desc_info->data; -} - -/****************************************** - * DMA rings and descriptors manipulations - ******************************************/ - -static u32 __pos_inc(u32 pos, size_t limit) -{ - return ++pos == limit ? 0 : pos; -} - -static int rocker_desc_err(const struct rocker_desc_info *desc_info) -{ - int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; - - switch (err) { - case ROCKER_OK: - return 0; - case -ROCKER_ENOENT: - return -ENOENT; - case -ROCKER_ENXIO: - return -ENXIO; - case -ROCKER_ENOMEM: - return -ENOMEM; - case -ROCKER_EEXIST: - return -EEXIST; - case -ROCKER_EINVAL: - return -EINVAL; - case -ROCKER_EMSGSIZE: - return -EMSGSIZE; - case -ROCKER_ENOTSUP: - return -EOPNOTSUPP; - case -ROCKER_ENOBUFS: - return -ENOBUFS; - } - - return -EINVAL; -} - -static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) -{ - desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; -} - -static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) -{ - u32 comp_err = desc_info->desc->comp_err; - - return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; -} - -static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) -{ - return (void *)(uintptr_t)desc_info->desc->cookie; -} - -static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, - void *ptr) -{ - desc_info->desc->cookie = (uintptr_t) ptr; -} - -static struct rocker_desc_info * -rocker_desc_head_get(const struct rocker_dma_ring_info *info) -{ - static struct rocker_desc_info *desc_info; - u32 head = __pos_inc(info->head, info->size); - - desc_info = &info->desc_info[info->head]; - if (head == info->tail) - return NULL; /* ring full */ - desc_info->tlv_size = 0; - return desc_info; -} - -static void rocker_desc_commit(const struct rocker_desc_info *desc_info) -{ - desc_info->desc->buf_size = desc_info->data_size; - desc_info->desc->tlv_size = desc_info->tlv_size; -} - -static void rocker_desc_head_set(const struct rocker *rocker, - struct rocker_dma_ring_info *info, - const struct rocker_desc_info *desc_info) -{ - u32 head = __pos_inc(info->head, info->size); - - BUG_ON(head == info->tail); - rocker_desc_commit(desc_info); - info->head = head; - rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); -} - -static struct rocker_desc_info * -rocker_desc_tail_get(struct rocker_dma_ring_info *info) -{ - static struct rocker_desc_info *desc_info; - - if (info->tail == info->head) - return NULL; /* nothing to be done between head and tail */ - desc_info = &info->desc_info[info->tail]; - if (!rocker_desc_gen(desc_info)) - return NULL; /* gen bit not set, desc is not ready yet */ - info->tail = __pos_inc(info->tail, info->size); - desc_info->tlv_size = desc_info->desc->tlv_size; - return desc_info; -} - -static void rocker_dma_ring_credits_set(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - u32 credits) -{ - if (credits) - rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); -} - -static unsigned long rocker_dma_ring_size_fix(size_t size) -{ - return max(ROCKER_DMA_SIZE_MIN, - min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); -} - -static int rocker_dma_ring_create(const struct rocker *rocker, - unsigned int type, - size_t size, - struct rocker_dma_ring_info *info) -{ - int i; - - BUG_ON(size != rocker_dma_ring_size_fix(size)); - info->size = size; - info->type = type; - info->head = 0; - info->tail = 0; - info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), - GFP_KERNEL); - if (!info->desc_info) - return -ENOMEM; - - info->desc = pci_alloc_consistent(rocker->pdev, - info->size * sizeof(*info->desc), - &info->mapaddr); - if (!info->desc) { - kfree(info->desc_info); - return -ENOMEM; - } - - for (i = 0; i < info->size; i++) - info->desc_info[i].desc = &info->desc[i]; - - rocker_write32(rocker, DMA_DESC_CTRL(info->type), - ROCKER_DMA_DESC_CTRL_RESET); - rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); - rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); - - return 0; -} - -static void rocker_dma_ring_destroy(const struct rocker *rocker, - const struct rocker_dma_ring_info *info) -{ - rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); - - pci_free_consistent(rocker->pdev, - info->size * sizeof(struct rocker_desc), - info->desc, info->mapaddr); - kfree(info->desc_info); -} - -static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, - struct rocker_dma_ring_info *info) -{ - int i; - - BUG_ON(info->head || info->tail); - - /* When ring is consumer, we need to advance head for each desc. - * That tells hw that the desc is ready to be used by it. - */ - for (i = 0; i < info->size - 1; i++) - rocker_desc_head_set(rocker, info, &info->desc_info[i]); - rocker_desc_commit(&info->desc_info[i]); -} - -static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - int direction, size_t buf_size) -{ - struct pci_dev *pdev = rocker->pdev; - int i; - int err; - - for (i = 0; i < info->size; i++) { - struct rocker_desc_info *desc_info = &info->desc_info[i]; - struct rocker_desc *desc = &info->desc[i]; - dma_addr_t dma_handle; - char *buf; - - buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); - if (!buf) { - err = -ENOMEM; - goto rollback; - } - - dma_handle = pci_map_single(pdev, buf, buf_size, direction); - if (pci_dma_mapping_error(pdev, dma_handle)) { - kfree(buf); - err = -EIO; - goto rollback; - } - - desc_info->data = buf; - desc_info->data_size = buf_size; - dma_unmap_addr_set(desc_info, mapaddr, dma_handle); - - desc->buf_addr = dma_handle; - desc->buf_size = buf_size; - } - return 0; - -rollback: - for (i--; i >= 0; i--) { - const struct rocker_desc_info *desc_info = &info->desc_info[i]; - - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), - desc_info->data_size, direction); - kfree(desc_info->data); - } - return err; -} - -static void rocker_dma_ring_bufs_free(const struct rocker *rocker, - const struct rocker_dma_ring_info *info, - int direction) -{ - struct pci_dev *pdev = rocker->pdev; - int i; - - for (i = 0; i < info->size; i++) { - const struct rocker_desc_info *desc_info = &info->desc_info[i]; - struct rocker_desc *desc = &info->desc[i]; - - desc->buf_addr = 0; - desc->buf_size = 0; - pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), - desc_info->data_size, direction); - kfree(desc_info->data); - } -} - -static int rocker_dma_rings_init(struct rocker *rocker) -{ - const struct pci_dev *pdev = rocker->pdev; - int err; - - err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, - ROCKER_DMA_CMD_DEFAULT_SIZE, - &rocker->cmd_ring); - if (err) { - dev_err(&pdev->dev, "failed to create command dma ring\n"); - return err; - } - - spin_lock_init(&rocker->cmd_ring_lock); - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); - if (err) { - dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); - goto err_dma_cmd_ring_bufs_alloc; - } - - err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, - ROCKER_DMA_EVENT_DEFAULT_SIZE, - &rocker->event_ring); - if (err) { - dev_err(&pdev->dev, "failed to create event dma ring\n"); - goto err_dma_event_ring_create; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, - PCI_DMA_FROMDEVICE, PAGE_SIZE); - if (err) { - dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); - goto err_dma_event_ring_bufs_alloc; - } - rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); - return 0; - -err_dma_event_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker->event_ring); -err_dma_event_ring_create: - rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL); -err_dma_cmd_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); - return err; -} - -static void rocker_dma_rings_fini(struct rocker *rocker) -{ - rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker->event_ring); - rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); -} - -static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - struct sk_buff *skb, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - - dma_handle = pci_map_single(pdev, skb->data, buf_len, - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(pdev, dma_handle)) - return -EIO; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) - goto tlv_put_failure; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) - goto tlv_put_failure; - return 0; - -tlv_put_failure: - pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); - desc_info->tlv_size = 0; - return -EMSGSIZE; -} - -static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) -{ - return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; -} - -static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info) -{ - struct net_device *dev = rocker_port->dev; - struct sk_buff *skb; - size_t buf_len = rocker_port_rx_buf_len(rocker_port); - int err; - - /* Ensure that hw will see tlv_size zero in case of an error. - * That tells hw to use another descriptor. - */ - rocker_desc_cookie_ptr_set(desc_info, NULL); - desc_info->tlv_size = 0; - - skb = netdev_alloc_skb_ip_align(dev, buf_len); - if (!skb) - return -ENOMEM; - err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); - if (err) { - dev_kfree_skb_any(skb); - return err; - } - rocker_desc_cookie_ptr_set(desc_info, skb); - return 0; -} - -static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, - const struct rocker_tlv **attrs) -{ - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - size_t len; - - if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || - !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) - return; - dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); - len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); - pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); -} - -static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, - const struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; - struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); - - if (!skb) - return; - rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); - rocker_dma_rx_ring_skb_unmap(rocker, attrs); - dev_kfree_skb_any(skb); -} - -static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) -{ - const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; - const struct rocker *rocker = rocker_port->rocker; - int i; - int err; - - for (i = 0; i < rx_ring->size; i++) { - err = rocker_dma_rx_ring_skb_alloc(rocker_port, - &rx_ring->desc_info[i]); - if (err) - goto rollback; - } - return 0; - -rollback: - for (i--; i >= 0; i--) - rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); - return err; -} - -static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) -{ - const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; - const struct rocker *rocker = rocker_port->rocker; - int i; - - for (i = 0; i < rx_ring->size; i++) - rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); -} - -static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) -{ - struct rocker *rocker = rocker_port->rocker; - int err; - - err = rocker_dma_ring_create(rocker, - ROCKER_DMA_TX(rocker_port->port_number), - ROCKER_DMA_TX_DEFAULT_SIZE, - &rocker_port->tx_ring); - if (err) { - netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); - return err; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE, - ROCKER_DMA_TX_DESC_SIZE); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); - goto err_dma_tx_ring_bufs_alloc; - } - - err = rocker_dma_ring_create(rocker, - ROCKER_DMA_RX(rocker_port->port_number), - ROCKER_DMA_RX_DEFAULT_SIZE, - &rocker_port->rx_ring); - if (err) { - netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); - goto err_dma_rx_ring_create; - } - - err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL, - ROCKER_DMA_RX_DESC_SIZE); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); - goto err_dma_rx_ring_bufs_alloc; - } - - err = rocker_dma_rx_ring_skbs_alloc(rocker_port); - if (err) { - netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); - goto err_dma_rx_ring_skbs_alloc; - } - rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); - - return 0; - -err_dma_rx_ring_skbs_alloc: - rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL); -err_dma_rx_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); -err_dma_rx_ring_create: - rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE); -err_dma_tx_ring_bufs_alloc: - rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); - return err; -} - -static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) -{ - struct rocker *rocker = rocker_port->rocker; - - rocker_dma_rx_ring_skbs_free(rocker_port); - rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, - PCI_DMA_BIDIRECTIONAL); - rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); - rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, - PCI_DMA_TODEVICE); - rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); -} - -static void rocker_port_set_enable(const struct rocker_port *rocker_port, - bool enable) -{ - u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); - - if (enable) - val |= 1ULL << rocker_port->pport; - else - val &= ~(1ULL << rocker_port->pport); - rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); -} - -/******************************** - * Interrupt handler and helpers - ********************************/ - -static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) -{ - struct rocker *rocker = dev_id; - const struct rocker_desc_info *desc_info; - struct rocker_wait *wait; - u32 credits = 0; - - spin_lock(&rocker->cmd_ring_lock); - while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { - wait = rocker_desc_cookie_ptr_get(desc_info); - if (wait->nowait) { - rocker_desc_gen_clear(desc_info); - rocker_wait_destroy(NULL, wait); - } else { - rocker_wait_wake_up(wait); - } - credits++; - } - spin_unlock(&rocker->cmd_ring_lock); - rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); - - return IRQ_HANDLED; -} - -static void rocker_port_link_up(const struct rocker_port *rocker_port) -{ - netif_carrier_on(rocker_port->dev); - netdev_info(rocker_port->dev, "Link is up\n"); -} - -static void rocker_port_link_down(const struct rocker_port *rocker_port) -{ - netif_carrier_off(rocker_port->dev); - netdev_info(rocker_port->dev, "Link is down\n"); -} - -static int rocker_event_link_change(const struct rocker *rocker, - const struct rocker_tlv *info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; - unsigned int port_number; - bool link_up; - struct rocker_port *rocker_port; - - rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || - !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) - return -EIO; - port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; - link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); - - if (port_number >= rocker->port_count) - return -EINVAL; - - rocker_port = rocker->ports[port_number]; - if (netif_carrier_ok(rocker_port->dev) != link_up) { - if (link_up) - rocker_port_link_up(rocker_port); - else - rocker_port_link_down(rocker_port); - } - - return 0; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags); - -static int rocker_event_mac_vlan_seen(const struct rocker *rocker, - const struct rocker_tlv *info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; - unsigned int port_number; - struct rocker_port *rocker_port; - const unsigned char *addr; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; - __be16 vlan_id; - - rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); - if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || - !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || - !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) - return -EIO; - port_number = - rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; - addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); - vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); - - if (port_number >= rocker->port_count) - return -EINVAL; - - rocker_port = rocker->ports[port_number]; - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - return 0; - - return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags); -} - -static int rocker_event_process(const struct rocker *rocker, - const struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; - const struct rocker_tlv *info; - u16 type; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); - if (!attrs[ROCKER_TLV_EVENT_TYPE] || - !attrs[ROCKER_TLV_EVENT_INFO]) - return -EIO; - - type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); - info = attrs[ROCKER_TLV_EVENT_INFO]; - - switch (type) { - case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: - return rocker_event_link_change(rocker, info); - case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: - return rocker_event_mac_vlan_seen(rocker, info); - } - - return -EOPNOTSUPP; -} - -static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) -{ - struct rocker *rocker = dev_id; - const struct pci_dev *pdev = rocker->pdev; - const struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { - err = rocker_desc_err(desc_info); - if (err) { - dev_err(&pdev->dev, "event desc received with err %d\n", - err); - } else { - err = rocker_event_process(rocker, desc_info); - if (err) - dev_err(&pdev->dev, "event processing failed with err %d\n", - err); - } - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); - credits++; - } - rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); - - return IRQ_HANDLED; -} - -static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) -{ - struct rocker_port *rocker_port = dev_id; - - napi_schedule(&rocker_port->napi_tx); - return IRQ_HANDLED; -} - -static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) -{ - struct rocker_port *rocker_port = dev_id; - - napi_schedule(&rocker_port->napi_rx); - return IRQ_HANDLED; -} - -/******************** - * Command interface - ********************/ - -typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv); - -typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv); - -static int rocker_cmd_exec(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - rocker_cmd_prep_cb_t prepare, void *prepare_priv, - rocker_cmd_proc_cb_t process, void *process_priv) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_wait *wait; - bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT); - unsigned long lock_flags; - int err; - - wait = rocker_wait_create(rocker_port, trans, flags); - if (!wait) - return -ENOMEM; - wait->nowait = nowait; - - spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); - - desc_info = rocker_desc_head_get(&rocker->cmd_ring); - if (!desc_info) { - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - err = -EAGAIN; - goto out; - } - - err = prepare(rocker_port, desc_info, prepare_priv); - if (err) { - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - goto out; - } - - rocker_desc_cookie_ptr_set(desc_info, wait); - - if (!switchdev_trans_ph_prepare(trans)) - rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); - - spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - - if (nowait) - return 0; - - if (!switchdev_trans_ph_prepare(trans)) - if (!rocker_wait_event_timeout(wait, HZ / 10)) - return -EIO; - - err = rocker_desc_err(desc_info); - if (err) - return err; - - if (process) - err = process(rocker_port, desc_info, process_priv); - - rocker_desc_gen_clear(desc_info); -out: - rocker_wait_destroy(trans, wait); - return err; -} - -static int -rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - struct ethtool_cmd *ecmd = priv; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - u32 speed; - u8 duplex; - u8 autoneg; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || - !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || - !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) - return -EIO; - - speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); - duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); - autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); - - ecmd->transceiver = XCVR_INTERNAL; - ecmd->supported = SUPPORTED_TP; - ecmd->phy_address = 0xff; - ecmd->port = PORT_TP; - ethtool_cmd_speed_set(ecmd, speed); - ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; - ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; - - return 0; -} - -static int -rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - unsigned char *macaddr = priv; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - const struct rocker_tlv *attr; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; - if (!attr) - return -EIO; - - if (rocker_tlv_len(attr) != ETH_ALEN) - return -EINVAL; - - ether_addr_copy(macaddr, rocker_tlv_data(attr)); - return 0; -} - -struct port_name { - char *buf; - size_t len; -}; - -static int -rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - struct port_name *name = priv; - const struct rocker_tlv *attr; - size_t i, j, len; - const char *str; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; - if (!attr) - return -EIO; - - len = min_t(size_t, rocker_tlv_len(attr), name->len); - str = rocker_tlv_data(attr); - - /* make sure name only contains alphanumeric characters */ - for (i = j = 0; i < len; ++i) { - if (isalnum(str[i])) { - name->buf[j] = str[i]; - j++; - } - } - - if (j == 0) - return -EIO; - - name->buf[j] = '\0'; - - return 0; -} - -static int -rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct ethtool_cmd *ecmd = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, - ethtool_cmd_speed(ecmd))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, - ecmd->duplex)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, - ecmd->autoneg)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const unsigned char *macaddr = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, - ETH_ALEN, macaddr)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - int mtu = *(int *)priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, - mtu)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int -rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, - !!(rocker_port->brport_flags & BR_LEARNING))) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - return 0; -} - -static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_ethtool_proc, - ecmd); -} - -static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, - unsigned char *macaddr) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_macaddr_proc, - macaddr); -} - -static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, - struct ethtool_cmd *ecmd) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_ethtool_prep, - ecmd, NULL, NULL); -} - -static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, - unsigned char *macaddr) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_macaddr_prep, - macaddr, NULL, NULL); -} - -static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, - int mtu) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_set_port_settings_mtu_prep, - &mtu, NULL, NULL); -} - -static int rocker_port_set_learning(struct rocker_port *rocker_port, - struct switchdev_trans *trans) -{ - return rocker_cmd_exec(rocker_port, trans, 0, - rocker_cmd_set_port_learning_prep, - NULL, NULL, NULL); -} - -static int -rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.ig_port.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.ig_port.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ig_port.goto_tbl)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.vlan.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.vlan.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.vlan.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.vlan.goto_tbl)) - return -EMSGSIZE; - if (entry->key.vlan.untagged && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, - entry->key.vlan.new_vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.term_mac.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.term_mac.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.term_mac.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.term_mac.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.term_mac.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.term_mac.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.term_mac.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.term_mac.goto_tbl)) - return -EMSGSIZE; - if (entry->key.term_mac.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.term_mac.copy_to_cpu)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.ucast_routing.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, - entry->key.ucast_routing.dst4)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, - entry->key.ucast_routing.dst4_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ucast_routing.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.ucast_routing.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (entry->key.bridge.has_eth_dst && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.bridge.eth_dst)) - return -EMSGSIZE; - if (entry->key.bridge.has_eth_dst_mask && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.bridge.eth_dst_mask)) - return -EMSGSIZE; - if (entry->key.bridge.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.bridge.vlan_id)) - return -EMSGSIZE; - if (entry->key.bridge.tunnel_id && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, - entry->key.bridge.tunnel_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.bridge.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.bridge.group_id)) - return -EMSGSIZE; - if (entry->key.bridge.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.bridge.copy_to_cpu)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.acl.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.acl.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->key.acl.eth_src)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_src_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.acl.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.acl.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.acl.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.acl.vlan_id_mask)) - return -EMSGSIZE; - - switch (ntohs(entry->key.acl.eth_type)) { - case ETH_P_IP: - case ETH_P_IPV6: - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, - entry->key.acl.ip_proto)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, - entry->key.acl.ip_proto_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, - entry->key.acl.ip_tos & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, - entry->key.acl.ip_tos_mask & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, - (entry->key.acl.ip_tos & 0xc0) >> 6)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_ECN_MASK, - (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) - return -EMSGSIZE; - break; - } - - if (entry->key.acl.group_id != ROCKER_GROUP_NONE && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.acl.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, - entry->key.tbl_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, - entry->key.priority)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - - switch (entry->key.tbl_id) { - case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: - err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_VLAN: - err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: - err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: - err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_BRIDGING: - err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: - err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, - struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, - ROCKER_GROUP_PORT_GET(entry->group_id))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, - entry->l2_interface.pop_vlan)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l2_rewrite.group_id)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l2_rewrite.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l2_rewrite.eth_dst)) - return -EMSGSIZE; - if (entry->l2_rewrite.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l2_rewrite.vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - int i; - struct rocker_tlv *group_ids; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, - entry->group_count)) - return -EMSGSIZE; - - group_ids = rocker_tlv_nest_start(desc_info, - ROCKER_TLV_OF_DPA_GROUP_IDS); - if (!group_ids) - return -EMSGSIZE; - - for (i = 0; i < entry->group_count; i++) - /* Note TLV array is 1-based */ - if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, group_ids); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l3_unicast.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l3_unicast.eth_dst)) - return -EMSGSIZE; - if (entry->l3_unicast.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l3_unicast.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, - entry->l3_unicast.ttl_check)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l3_unicast.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: - err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: - err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: - err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -/*************************************************** - * Flow, group, FDB, internal VLAN and neigh tables - ***************************************************/ - -static int rocker_init_tbls(struct rocker *rocker) -{ - hash_init(rocker->flow_tbl); - spin_lock_init(&rocker->flow_tbl_lock); - - hash_init(rocker->group_tbl); - spin_lock_init(&rocker->group_tbl_lock); - - hash_init(rocker->fdb_tbl); - spin_lock_init(&rocker->fdb_tbl_lock); - - hash_init(rocker->internal_vlan_tbl); - spin_lock_init(&rocker->internal_vlan_tbl_lock); - - hash_init(rocker->neigh_tbl); - spin_lock_init(&rocker->neigh_tbl_lock); - - return 0; -} - -static void rocker_free_tbls(struct rocker *rocker) -{ - unsigned long flags; - struct rocker_flow_tbl_entry *flow_entry; - struct rocker_group_tbl_entry *group_entry; - struct rocker_fdb_tbl_entry *fdb_entry; - struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; - struct rocker_neigh_tbl_entry *neigh_entry; - struct hlist_node *tmp; - int bkt; - - spin_lock_irqsave(&rocker->flow_tbl_lock, flags); - hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) - hash_del(&flow_entry->entry); - spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - - spin_lock_irqsave(&rocker->group_tbl_lock, flags); - hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) - hash_del(&group_entry->entry); - spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) - hash_del(&fdb_entry->entry); - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); - hash_for_each_safe(rocker->internal_vlan_tbl, bkt, - tmp, internal_vlan_entry, entry) - hash_del(&internal_vlan_entry->entry); - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); - - spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); - hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) - hash_del(&neigh_entry->entry); - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); -} - -static struct rocker_flow_tbl_entry * -rocker_flow_tbl_find(const struct rocker *rocker, - const struct rocker_flow_tbl_entry *match) -{ - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - - hash_for_each_possible(rocker->flow_tbl, found, - entry, match->key_crc32) { - if (memcmp(&found->key, &match->key, key_len) == 0) - return found; - } - - return NULL; -} - -static int rocker_flow_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - match->cookie = found->cookie; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_kfree(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; - } else { - found = match; - found->cookie = rocker->flow_tbl_next_cookie++; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_add, found, NULL, NULL); -} - -static int rocker_flow_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - int err = 0; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; - } - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - rocker_kfree(trans, match); - - if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_del, - found, NULL, NULL); - rocker_kfree(trans, found); - } - - return err; -} - -static int rocker_flow_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_flow_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_flow_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - enum rocker_of_dpa_table_id goto_tbl) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_IG_PORT; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_pport = in_pport; - entry->key.ig_port.in_pport_mask = in_pport_mask; - entry->key.ig_port.goto_tbl = goto_tbl; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, __be16 vlan_id, - __be16 vlan_id_mask, - enum rocker_of_dpa_table_id goto_tbl, - bool untagged, __be16 new_vlan_id) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_VLAN; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_pport = in_pport; - entry->key.vlan.vlan_id = vlan_id; - entry->key.vlan.vlan_id_mask = vlan_id_mask; - entry->key.vlan.goto_tbl = goto_tbl; - - entry->key.vlan.untagged = untagged; - entry->key.vlan.new_vlan_id = new_vlan_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 in_pport, u32 in_pport_mask, - __be16 eth_type, const u8 *eth_dst, - const u8 *eth_dst_mask, __be16 vlan_id, - __be16 vlan_id_mask, bool copy_to_cpu, - int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - if (is_multicast_ether_addr(eth_dst)) { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; - } else { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - } - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_pport = in_pport; - entry->key.term_mac.in_pport_mask = in_pport_mask; - entry->key.term_mac.eth_type = eth_type; - ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); - ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); - entry->key.term_mac.vlan_id = vlan_id; - entry->key.term_mac.vlan_id_mask = vlan_id_mask; - entry->key.term_mac.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 vlan_id, u32 tunnel_id, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, bool copy_to_cpu) -{ - struct rocker_flow_tbl_entry *entry; - u32 priority; - bool vlan_bridging = !!vlan_id; - bool dflt = !eth_dst || (eth_dst && eth_dst_mask); - bool wild = false; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; - - if (eth_dst) { - entry->key.bridge.has_eth_dst = 1; - ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); - } - if (eth_dst_mask) { - entry->key.bridge.has_eth_dst_mask = 1; - ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); - if (!ether_addr_equal(eth_dst_mask, ff_mac)) - wild = true; - } - - priority = ROCKER_PRIORITY_UNKNOWN; - if (vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; - else if (vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; - else if (vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_VLAN; - else if (!vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; - else if (!vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; - else if (!vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_TENANT; - - entry->key.priority = priority; - entry->key.bridge.vlan_id = vlan_id; - entry->key.bridge.tunnel_id = tunnel_id; - entry->key.bridge.goto_tbl = goto_tbl; - entry->key.bridge.group_id = group_id; - entry->key.bridge.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be16 eth_type, __be32 dst, - __be32 dst_mask, u32 priority, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - entry->key.priority = priority; - entry->key.ucast_routing.eth_type = eth_type; - entry->key.ucast_routing.dst4 = dst; - entry->key.ucast_routing.dst4_mask = dst_mask; - entry->key.ucast_routing.goto_tbl = goto_tbl; - entry->key.ucast_routing.group_id = group_id; - entry->key_len = offsetof(struct rocker_flow_tbl_key, - ucast_routing.group_id); - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - const u8 *eth_src, const u8 *eth_src_mask, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 eth_type, __be16 vlan_id, - __be16 vlan_id_mask, u8 ip_proto, - u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, - u32 group_id) -{ - u32 priority; - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - priority = ROCKER_PRIORITY_ACL_NORMAL; - if (eth_dst && eth_dst_mask) { - if (ether_addr_equal(eth_dst_mask, mcast_mac)) - priority = ROCKER_PRIORITY_ACL_DFLT; - else if (is_link_local_ether_addr(eth_dst)) - priority = ROCKER_PRIORITY_ACL_CTRL; - } - - entry->key.priority = priority; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_pport = in_pport; - entry->key.acl.in_pport_mask = in_pport_mask; - - if (eth_src) - ether_addr_copy(entry->key.acl.eth_src, eth_src); - if (eth_src_mask) - ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); - if (eth_dst) - ether_addr_copy(entry->key.acl.eth_dst, eth_dst); - if (eth_dst_mask) - ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); - - entry->key.acl.eth_type = eth_type; - entry->key.acl.vlan_id = vlan_id; - entry->key.acl.vlan_id_mask = vlan_id_mask; - entry->key.acl.ip_proto = ip_proto; - entry->key.acl.ip_proto_mask = ip_proto_mask; - entry->key.acl.ip_tos = ip_tos; - entry->key.acl.ip_tos_mask = ip_tos_mask; - entry->key.acl.group_id = group_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_group_tbl_entry * -rocker_group_tbl_find(const struct rocker *rocker, - const struct rocker_group_tbl_entry *match) -{ - struct rocker_group_tbl_entry *found; - - hash_for_each_possible(rocker->group_tbl, found, - entry, match->group_id) { - if (found->group_id == match->group_id) - return found; - } - - return NULL; -} - -static void rocker_group_tbl_entry_free(struct switchdev_trans *trans, - struct rocker_group_tbl_entry *entry) -{ - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - rocker_kfree(trans, entry->group_ids); - break; - default: - break; - } - rocker_kfree(trans, entry); -} - -static int rocker_group_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_group_tbl_entry_free(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; - } else { - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->group_tbl, &found->entry, found->group_id); - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_add, found, NULL, NULL); -} - -static int rocker_group_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - int err = 0; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; - } - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - rocker_group_tbl_entry_free(trans, match); - - if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_del, - found, NULL, NULL); - rocker_group_tbl_entry_free(trans, found); - } - - return err; -} - -static int rocker_group_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_group_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_group_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_interface(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u32 out_pport, - int pop_vlan) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - entry->l2_interface.pop_vlan = pop_vlan; - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = group_id; - entry->group_count = group_count; - - entry->group_ids = rocker_kcalloc(trans, flags, - group_count, sizeof(u32)); - if (!entry->group_ids) { - rocker_kfree(trans, entry); - return -ENOMEM; - } - memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_flood(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - return rocker_group_l2_fan_out(rocker_port, trans, flags, - group_count, group_ids, - group_id); -} - -static int rocker_group_l3_unicast(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 index, const u8 *src_mac, const u8 *dst_mac, - __be16 vlan_id, bool ttl_check, u32 pport) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L3_UNICAST(index); - if (src_mac) - ether_addr_copy(entry->l3_unicast.eth_src, src_mac); - if (dst_mac) - ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); - entry->l3_unicast.vlan_id = vlan_id; - entry->l3_unicast.ttl_check = ttl_check; - entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_neigh_tbl_entry * -rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr) -{ - struct rocker_neigh_tbl_entry *found; - - hash_for_each_possible(rocker->neigh_tbl, found, - entry, be32_to_cpu(ip_addr)) - if (found->ip_addr == ip_addr) - return found; - - return NULL; -} - -static void _rocker_neigh_add(struct rocker *rocker, - struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (!switchdev_trans_ph_commit(trans)) - entry->index = rocker->neigh_tbl_next_index++; - if (switchdev_trans_ph_prepare(trans)) - return; - entry->ref_count++; - hash_add(rocker->neigh_tbl, &entry->entry, - be32_to_cpu(entry->ip_addr)); -} - -static void _rocker_neigh_del(struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (switchdev_trans_ph_prepare(trans)) - return; - if (--entry->ref_count == 0) { - hash_del(&entry->entry); - rocker_kfree(trans, entry); - } -} - -static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry, - struct switchdev_trans *trans, - const u8 *eth_dst, bool ttl_check) -{ - if (eth_dst) { - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = ttl_check; - } else if (!switchdev_trans_ph_prepare(trans)) { - entry->ref_count++; - } -} - -static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be32 ip_addr, const u8 *eth_dst) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - __be16 eth_type = htons(ETH_P_IP); - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - u32 priority = 0; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - int err = 0; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = true; - _rocker_neigh_add(rocker, trans, entry); - } else if (removing) { - memcpy(entry, found, sizeof(*entry)); - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, eth_dst, true); - memcpy(entry, found, sizeof(*entry)); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (err) - goto err_out; - - /* For each active neighbor, we have an L3 unicast group and - * a /32 route to the neighbor, which uses the L3 unicast - * group. The L3 unicast group can also be referred to by - * other routes' nexthops. - */ - - err = rocker_group_l3_unicast(rocker_port, trans, flags, - entry->index, - rocker_port->dev->dev_addr, - entry->eth_dst, - rocker_port->internal_vlan_id, - entry->ttl_check, - rocker_port->pport); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) L3 unicast group index %d\n", - err, entry->index); - goto err_out; - } - - if (adding || removing) { - group_id = ROCKER_GROUP_L3_UNICAST(entry->index); - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, - eth_type, ip_addr, - inet_make_mask(32), - priority, goto_tbl, - group_id, flags); - - if (err) - netdev_err(rocker_port->dev, - "Error (%d) /32 unicast route %pI4 group 0x%08x\n", - err, &entry->ip_addr, group_id); - } - -err_out: - if (!adding) - rocker_kfree(trans, entry); - - return err; -} - -static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be32 ip_addr) -{ - struct net_device *dev = rocker_port->dev; - struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); - int err = 0; - - if (!n) { - n = neigh_create(&arp_tbl, &ip_addr, dev); - if (IS_ERR(n)) - return IS_ERR(n); - } - - /* If the neigh is already resolved, then go ahead and - * install the entry, otherwise start the ARP process to - * resolve the neigh. - */ - - if (n->nud_state & NUD_VALID) - err = rocker_port_ipv4_neigh(rocker_port, trans, 0, - ip_addr, n->ha); - else - neigh_event_send(n, NULL); - - neigh_release(n); - return err; -} - -static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be32 ip_addr, u32 *index) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - bool resolved = true; - int err = 0; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - if (found) - *index = found->index; - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - _rocker_neigh_add(rocker, trans, entry); - *index = entry->index; - resolved = false; - } else if (removing) { - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, NULL, false); - resolved = !is_zero_ether_addr(found->eth_dst); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (!adding) - rocker_kfree(trans, entry); - - if (err) - return err; - - /* Resolved means neigh ip_addr is resolved to neigh mac. */ - - if (!resolved) - err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr); - - return err; -} - -static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be16 vlan_id) -{ - struct rocker_port *p; - const struct rocker *rocker = rocker_port->rocker; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 *group_ids; - u8 group_count = 0; - int err = 0; - int i; - - group_ids = rocker_kcalloc(trans, flags, - rocker->port_count, sizeof(u32)); - if (!group_ids) - return -ENOMEM; - - /* Adjust the flood group for this VLAN. The flood group - * references an L2 interface group for each port in this - * VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (!p) - continue; - if (!rocker_port_is_bridged(p)) - continue; - if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { - group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); - } - } - - /* If there are no bridged ports in this VLAN, we're done */ - if (group_count == 0) - goto no_ports_in_vlan; - - err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id, - group_count, group_ids, group_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - -no_ports_in_vlan: - rocker_kfree(trans, group_ids); - return err; -} - -static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, bool pop_vlan) -{ - const struct rocker *rocker = rocker_port->rocker; - struct rocker_port *p; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_pport; - int ref = 0; - int err; - int i; - - /* An L2 interface group for this port in this VLAN, but - * only when port STP state is LEARNING|FORWARDING. - */ - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) { - out_pport = rocker_port->pport; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - /* An L2 interface group for this VLAN to CPU port. - * Add when first port joins this VLAN and destroy when - * last port leaves this VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) - ref++; - } - - if ((!adding || ref != 1) && (adding || ref != 0)) - return 0; - - out_pport = 0; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for CPU port\n", err); - return err; - } - - return 0; -} - -static struct rocker_ctrl { - const u8 *eth_dst; - const u8 *eth_dst_mask; - __be16 eth_type; - bool acl; - bool bridge; - bool term; - bool copy_to_cpu; -} rocker_ctrls[] = { - [ROCKER_CTRL_LINK_LOCAL_MCAST] = { - /* pass link local multicast pkts up to CPU for filtering */ - .eth_dst = ll_mac, - .eth_dst_mask = ll_mask, - .acl = true, - }, - [ROCKER_CTRL_LOCAL_ARP] = { - /* pass local ARP pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .eth_type = htons(ETH_P_ARP), - .acl = true, - }, - [ROCKER_CTRL_IPV4_MCAST] = { - /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ - .eth_dst = ipv4_mcast, - .eth_dst_mask = ipv4_mask, - .eth_type = htons(ETH_P_IP), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_IPV6_MCAST] = { - /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ - .eth_dst = ipv6_mcast, - .eth_dst_mask = ipv6_mask, - .eth_type = htons(ETH_P_IPV6), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_BRIDGING] = { - /* flood any pkts on vlan */ - .bridge = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_OVS] = { - /* pass all pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .acl = true, - }, -}; - -static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport = rocker_port->pport; - u32 in_pport_mask = 0xffffffff; - u32 out_pport = 0; - const u8 *eth_src = NULL; - const u8 *eth_src_mask = NULL; - __be16 vlan_id_mask = htons(0xffff); - u8 ip_proto = 0; - u8 ip_proto_mask = 0; - u8 ip_tos = 0; - u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - int err; - - err = rocker_flow_tbl_acl(rocker_port, trans, flags, - in_pport, in_pport_mask, - eth_src, eth_src_mask, - ctrl->eth_dst, ctrl->eth_dst_mask, - ctrl->eth_type, - vlan_id, vlan_id_mask, - ip_proto, ip_proto_mask, - ip_tos, ip_tos_mask, - group_id); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, - const struct rocker_ctrl *ctrl, - __be16 vlan_id) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 tunnel_id = 0; - int err; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, - ctrl->eth_dst, ctrl->eth_dst_mask, - vlan_id, tunnel_id, - goto_tbl, group_id, ctrl->copy_to_cpu); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 vlan_id_mask = htons(0xffff); - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - ctrl->eth_type, ctrl->eth_dst, - ctrl->eth_dst_mask, vlan_id, - vlan_id_mask, ctrl->copy_to_cpu, - flags); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - if (ctrl->acl) - return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags, - ctrl, vlan_id); - if (ctrl->bridge) - return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags, - ctrl, vlan_id); - - if (ctrl->term) - return rocker_port_ctrl_vlan_term(rocker_port, trans, flags, - ctrl, vlan_id); - - return -EOPNOTSUPP; -} - -static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - int err = 0; - int i; - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (rocker_port->ctrls[i]) { - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - &rocker_ctrls[i], vlan_id); - if (err) - return err; - } - } - - return err; -} - -static int rocker_port_ctrl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - ctrl, htons(vid)); - if (err) - break; - } - - return err; -} - -static int rocker_port_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, u16 vid) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_pport = rocker_port->pport; - __be16 vlan_id = htons(vid); - __be16 vlan_id_mask = htons(0xffff); - __be16 internal_vlan_id; - bool untagged; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - int err; - - internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); - - if (adding && test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already added */ - else if (!adding && !test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already removed */ - - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - if (adding) { - err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port ctrl vlan add\n", err); - goto err_out; - } - } - - err = rocker_port_vlan_l2_groups(rocker_port, trans, flags, - internal_vlan_id, untagged); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 groups\n", err); - goto err_out; - } - - err = rocker_port_vlan_flood_group(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - goto err_out; - } - - err = rocker_flow_tbl_vlan(rocker_port, trans, flags, - in_pport, vlan_id, vlan_id_mask, - goto_tbl, untagged, internal_vlan_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN table\n", err); - -err_out: - if (switchdev_trans_ph_prepare(trans)) - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - return err; -} - -static int rocker_port_ig_tbl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - enum rocker_of_dpa_table_id goto_tbl; - u32 in_pport; - u32 in_pport_mask; - int err; - - /* Normal Ethernet Frames. Matches pkts from any local physical - * ports. Goto VLAN tbl. - */ - - in_pport = 0; - in_pport_mask = 0xffff0000; - goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; - - err = rocker_flow_tbl_ig_port(rocker_port, trans, flags, - in_pport, in_pport_mask, - goto_tbl); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) ingress port table entry\n", err); - - return err; -} - -struct rocker_fdb_learn_work { - struct work_struct work; - struct rocker_port *rocker_port; - struct switchdev_trans *trans; - int flags; - u8 addr[ETH_ALEN]; - u16 vid; -}; - -static void rocker_port_fdb_learn_work(struct work_struct *work) -{ - const struct rocker_fdb_learn_work *lw = - container_of(work, struct rocker_fdb_learn_work, work); - bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); - bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); - struct switchdev_notifier_fdb_info info; - - info.addr = lw->addr; - info.vid = lw->vid; - - rtnl_lock(); - if (learned && removing) - call_switchdev_notifiers(SWITCHDEV_FDB_DEL, - lw->rocker_port->dev, &info.info); - else if (learned && !removing) - call_switchdev_notifiers(SWITCHDEV_FDB_ADD, - lw->rocker_port->dev, &info.info); - rtnl_unlock(); - - rocker_kfree(lw->trans, work); -} - -static int rocker_port_fdb_learn(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *addr, __be16 vlan_id) -{ - struct rocker_fdb_learn_work *lw; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_pport = rocker_port->pport; - u32 tunnel_id = 0; - u32 group_id = ROCKER_GROUP_NONE; - bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); - bool copy_to_cpu = false; - int err; - - if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - - if (!(flags & ROCKER_OP_FLAG_REFRESH)) { - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr, - NULL, vlan_id, tunnel_id, goto_tbl, - group_id, copy_to_cpu); - if (err) - return err; - } - - if (!syncing) - return 0; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - lw = rocker_kzalloc(trans, flags, sizeof(*lw)); - if (!lw) - return -ENOMEM; - - INIT_WORK(&lw->work, rocker_port_fdb_learn_work); - - lw->rocker_port = rocker_port; - lw->trans = trans; - lw->flags = flags; - ether_addr_copy(lw->addr, addr); - lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); - - if (switchdev_trans_ph_prepare(trans)) - rocker_kfree(trans, lw); - else - schedule_work(&lw->work); - - return 0; -} - -static struct rocker_fdb_tbl_entry * -rocker_fdb_tbl_find(const struct rocker *rocker, - const struct rocker_fdb_tbl_entry *match) -{ - struct rocker_fdb_tbl_entry *found; - - hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) - if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) - return found; - - return NULL; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *fdb; - struct rocker_fdb_tbl_entry *found; - bool removing = (flags & ROCKER_OP_FLAG_REMOVE); - unsigned long lock_flags; - - fdb = rocker_kzalloc(trans, flags, sizeof(*fdb)); - if (!fdb) - return -ENOMEM; - - fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->touched = jiffies; - fdb->key.rocker_port = rocker_port; - ether_addr_copy(fdb->key.addr, addr); - fdb->key.vlan_id = vlan_id; - fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - found = rocker_fdb_tbl_find(rocker, fdb); - - if (found) { - found->touched = jiffies; - if (removing) { - rocker_kfree(trans, fdb); - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - } else if (!removing) { - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->fdb_tbl, &fdb->entry, - fdb->key_crc32); - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - /* Check if adding and already exists, or removing and can't find */ - if (!found != !removing) { - rocker_kfree(trans, fdb); - if (!found && removing) - return 0; - /* Refreshing existing to update aging timers */ - flags |= ROCKER_OP_FLAG_REFRESH; - } - - return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id); -} - -static int rocker_port_fdb_flush(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - unsigned long lock_flags; - struct hlist_node *tmp; - int bkt; - int err = 0; - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) - return 0; - - flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - if (!found->learned) - continue; - err = rocker_port_fdb_learn(rocker_port, trans, flags, - found->key.addr, - found->key.vlan_id); - if (err) - goto err_out; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - -err_out: - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static void rocker_fdb_cleanup(unsigned long data) -{ - struct rocker *rocker = (struct rocker *)data; - struct rocker_port *rocker_port; - struct rocker_fdb_tbl_entry *entry; - struct hlist_node *tmp; - unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; - unsigned long expires; - unsigned long lock_flags; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE | - ROCKER_OP_FLAG_LEARNED; - int bkt; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) { - if (!entry->learned) - continue; - rocker_port = entry->key.rocker_port; - expires = entry->touched + rocker_port->ageing_time; - if (time_before_eq(expires, jiffies)) { - rocker_port_fdb_learn(rocker_port, NULL, - flags, entry->key.addr, - entry->key.vlan_id); - hash_del(&entry->entry); - } else if (time_before(expires, next_timer)) { - next_timer = expires; - } - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer)); -} - -static int rocker_port_router_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 eth_type; - const u8 *dst_mac_mask = ff_mac; - __be16 vlan_id_mask = htons(0xffff); - bool copy_to_cpu = false; - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - eth_type = htons(ETH_P_IP); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - if (err) - return err; - - eth_type = htons(ETH_P_IPV6); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - - return err; -} - -static int rocker_port_fwding(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - bool pop_vlan; - u32 out_pport; - __be16 vlan_id; - u16 vid; - int err; - - /* Port will be forwarding-enabled if its STP state is LEARNING - * or FORWARDING. Traffic from CPU can still egress, regardless of - * port STP state. Use L2 interface group on port VLANs as a way - * to toggle port forwarding: if forwarding is disabled, L2 - * interface group will not exist. - */ - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - flags |= ROCKER_OP_FLAG_REMOVE; - - out_pport = rocker_port->pport; - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan_id = htons(vid); - pop_vlan = rocker_vlan_id_is_internal(vlan_id); - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - return 0; -} - -static int rocker_port_stp_update(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u8 state) -{ - bool want[ROCKER_CTRL_MAX] = { 0, }; - bool prev_ctrls[ROCKER_CTRL_MAX]; - u8 uninitialized_var(prev_state); - int err; - int i; - - if (switchdev_trans_ph_prepare(trans)) { - memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls)); - prev_state = rocker_port->stp_state; - } - - if (rocker_port->stp_state == state) - return 0; - - rocker_port->stp_state = state; - - switch (state) { - case BR_STATE_DISABLED: - /* port is completely disabled */ - break; - case BR_STATE_LISTENING: - case BR_STATE_BLOCKING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - break; - case BR_STATE_LEARNING: - case BR_STATE_FORWARDING: - if (!rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - want[ROCKER_CTRL_IPV4_MCAST] = true; - want[ROCKER_CTRL_IPV6_MCAST] = true; - if (rocker_port_is_bridged(rocker_port)) - want[ROCKER_CTRL_DFLT_BRIDGING] = true; - else if (rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_DFLT_OVS] = true; - else - want[ROCKER_CTRL_LOCAL_ARP] = true; - break; - } - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (want[i] != rocker_port->ctrls[i]) { - int ctrl_flags = flags | - (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); - err = rocker_port_ctrl(rocker_port, trans, ctrl_flags, - &rocker_ctrls[i]); - if (err) - goto err_out; - rocker_port->ctrls[i] = want[i]; - } - } - - err = rocker_port_fdb_flush(rocker_port, trans, flags); - if (err) - goto err_out; - - err = rocker_port_fwding(rocker_port, trans, flags); - -err_out: - if (switchdev_trans_ph_prepare(trans)) { - memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); - rocker_port->stp_state = prev_state; - } - - return err; -} - -static int rocker_port_fwd_enable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will enable port */ - return 0; - - /* port is not bridged, so simulate going to FORWARDING state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_FORWARDING); -} - -static int rocker_port_fwd_disable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will disable port */ - return 0; - - /* port is not bridged, so simulate going to DISABLED state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_DISABLED); -} - -static struct rocker_internal_vlan_tbl_entry * -rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex) -{ - struct rocker_internal_vlan_tbl_entry *found; - - hash_for_each_possible(rocker->internal_vlan_tbl, found, - entry, ifindex) { - if (found->ifindex == ifindex) - return found; - } - - return NULL; -} - -static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *entry; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - int i; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return 0; - - entry->ifindex = ifindex; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (found) { - kfree(entry); - goto found; - } - - found = entry; - hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); - - for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { - if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) - continue; - found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); - goto found; - } - - netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); - -found: - found->ref_count++; - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); - - return found->vlan_id; -} - -static void -rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - unsigned long bit; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (!found) { - netdev_err(rocker_port->dev, - "ifindex (%d) not found in internal VLAN tbl\n", - ifindex); - goto not_found; - } - - if (--found->ref_count <= 0) { - bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; - clear_bit(bit, rocker->internal_vlan_bitmap); - hash_del(&found->entry); - kfree(found); - } - -not_found: - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); -} - -static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, - struct switchdev_trans *trans, __be32 dst, - int dst_len, const struct fib_info *fi, - u32 tb_id, int flags) -{ - const struct fib_nh *nh; - __be16 eth_type = htons(ETH_P_IP); - __be32 dst_mask = inet_make_mask(dst_len); - __be16 internal_vlan_id = rocker_port->internal_vlan_id; - u32 priority = fi->fib_priority; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - bool nh_on_port; - bool has_gw; - u32 index; - int err; - - /* XXX support ECMP */ - - nh = fi->fib_nh; - nh_on_port = (fi->fib_dev == rocker_port->dev); - has_gw = !!nh->nh_gw; - - if (has_gw && nh_on_port) { - err = rocker_port_ipv4_nh(rocker_port, trans, flags, - nh->nh_gw, &index); - if (err) - return err; - - group_id = ROCKER_GROUP_L3_UNICAST(index); - } else { - /* Send to CPU for processing */ - group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); - } - - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst, - dst_mask, priority, goto_tbl, - group_id, flags); - if (err) - netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", - err, &dst); - - return err; -} - -/***************** - * Net device ops - *****************/ - -static int rocker_port_open(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_dma_rings_init(rocker_port); - if (err) - return err; - - err = request_irq(rocker_msix_tx_vector(rocker_port), - rocker_tx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign tx irq\n"); - goto err_request_tx_irq; - } - - err = request_irq(rocker_msix_rx_vector(rocker_port), - rocker_rx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign rx irq\n"); - goto err_request_rx_irq; - } - - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - if (err) - goto err_fwd_enable; - - napi_enable(&rocker_port->napi_tx); - napi_enable(&rocker_port->napi_rx); - if (!dev->proto_down) - rocker_port_set_enable(rocker_port, true); - netif_start_queue(dev); - return 0; - -err_fwd_enable: - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); -err_request_rx_irq: - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); -err_request_tx_irq: - rocker_port_dma_rings_fini(rocker_port); - return err; -} - -static int rocker_port_stop(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - netif_stop_queue(dev); - rocker_port_set_enable(rocker_port, false); - napi_disable(&rocker_port->napi_rx); - napi_disable(&rocker_port->napi_tx); - rocker_port_fwd_disable(rocker_port, NULL, - ROCKER_OP_FLAG_NOWAIT); - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); - rocker_port_dma_rings_fini(rocker_port); - - return 0; -} - -static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; - struct rocker_tlv *attr; - int rem; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); - if (!attrs[ROCKER_TLV_TX_FRAGS]) - return; - rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { - const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; - dma_addr_t dma_handle; - size_t len; - - if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) - continue; - rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, - attr); - if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || - !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) - continue; - dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); - len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); - pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); - } -} - -static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - char *buf, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - struct rocker_tlv *frag; - - dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); - return -EIO; - } - frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); - if (!frag) - goto unmap_frag; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, - dma_handle)) - goto nest_cancel; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, - buf_len)) - goto nest_cancel; - rocker_tlv_nest_end(desc_info, frag); - return 0; - -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frag); -unmap_frag: - pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); - return -EMSGSIZE; -} - -static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_tlv *frags; - int i; - int err; - - desc_info = rocker_desc_head_get(&rocker_port->tx_ring); - if (unlikely(!desc_info)) { - if (net_ratelimit()) - netdev_err(dev, "tx ring full when queue awake\n"); - return NETDEV_TX_BUSY; - } - - rocker_desc_cookie_ptr_set(desc_info, skb); - - frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); - if (!frags) - goto out; - err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, - skb->data, skb_headlen(skb)); - if (err) - goto nest_cancel; - if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { - err = skb_linearize(skb); - if (err) - goto unmap_frags; - } - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - - err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, - skb_frag_address(frag), - skb_frag_size(frag)); - if (err) - goto unmap_frags; - } - rocker_tlv_nest_end(desc_info, frags); - - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); - - desc_info = rocker_desc_head_get(&rocker_port->tx_ring); - if (!desc_info) - netif_stop_queue(dev); - - return NETDEV_TX_OK; - -unmap_frags: - rocker_tx_desc_frags_unmap(rocker_port, desc_info); -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frags); -out: - dev_kfree_skb(skb); - dev->stats.tx_dropped++; - - return NETDEV_TX_OK; -} - -static int rocker_port_set_mac_address(struct net_device *dev, void *p) -{ - struct sockaddr *addr = p; - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - if (!is_valid_ether_addr(addr->sa_data)) - return -EADDRNOTAVAIL; - - err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); - if (err) - return err; - memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - return 0; -} - -static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int running = netif_running(dev); - int err; - -#define ROCKER_PORT_MIN_MTU 68 -#define ROCKER_PORT_MAX_MTU 9000 - - if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) - return -EINVAL; - - if (running) - rocker_port_stop(dev); - - netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); - dev->mtu = new_mtu; - - err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); - if (err) - return err; - - if (running) - err = rocker_port_open(dev); - - return err; -} - -static int rocker_port_get_phys_port_name(struct net_device *dev, - char *buf, size_t len) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct port_name name = { .buf = buf, .len = len }; - int err; - - err = rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_settings_prep, NULL, - rocker_cmd_get_port_settings_phys_name_proc, - &name); - - return err ? -EOPNOTSUPP : 0; -} - -static int rocker_port_change_proto_down(struct net_device *dev, - bool proto_down) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_port->dev->flags & IFF_UP) - rocker_port_set_enable(rocker_port, !proto_down); - rocker_port->dev->proto_down = proto_down; - return 0; -} - -static void rocker_port_neigh_destroy(struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(n->dev); - int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - rocker_port_ipv4_neigh(rocker_port, NULL, - flags, ip_addr, n->ha); -} - -static const struct net_device_ops rocker_port_netdev_ops = { - .ndo_open = rocker_port_open, - .ndo_stop = rocker_port_stop, - .ndo_start_xmit = rocker_port_xmit, - .ndo_set_mac_address = rocker_port_set_mac_address, - .ndo_change_mtu = rocker_port_change_mtu, - .ndo_bridge_getlink = switchdev_port_bridge_getlink, - .ndo_bridge_setlink = switchdev_port_bridge_setlink, - .ndo_bridge_dellink = switchdev_port_bridge_dellink, - .ndo_fdb_add = switchdev_port_fdb_add, - .ndo_fdb_del = switchdev_port_fdb_del, - .ndo_fdb_dump = switchdev_port_fdb_dump, - .ndo_get_phys_port_name = rocker_port_get_phys_port_name, - .ndo_change_proto_down = rocker_port_change_proto_down, - .ndo_neigh_destroy = rocker_port_neigh_destroy, -}; - -/******************** - * swdev interface - ********************/ - -static int rocker_port_attr_get(struct net_device *dev, - struct switchdev_attr *attr) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - const struct rocker *rocker = rocker_port->rocker; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: - attr->u.ppid.id_len = sizeof(rocker->hw.id); - memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - attr->u.brport_flags = rocker_port->brport_flags; - break; - default: - return -EOPNOTSUPP; - } - - return 0; -} - -static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - unsigned long brport_flags) -{ - unsigned long orig_flags; - int err = 0; - - orig_flags = rocker_port->brport_flags; - rocker_port->brport_flags = brport_flags; - if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING) - err = rocker_port_set_learning(rocker_port, trans); - - if (switchdev_trans_ph_prepare(trans)) - rocker_port->brport_flags = orig_flags; - - return err; -} - -static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 ageing_time) -{ - if (!switchdev_trans_ph_prepare(trans)) { - rocker_port->ageing_time = clock_t_to_jiffies(ageing_time); - mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies); - } - - return 0; -} - -static int rocker_port_attr_set(struct net_device *dev, - const struct switchdev_attr *attr, - struct switchdev_trans *trans) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err = 0; - - switch (attr->id) { - case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = rocker_port_stp_update(rocker_port, trans, 0, - attr->u.stp_state); - break; - case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = rocker_port_brport_flags_set(rocker_port, trans, - attr->u.brport_flags); - break; - case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = rocker_port_bridge_ageing_time(rocker_port, trans, - attr->u.ageing_time); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u16 vid, u16 flags) -{ - int err; - - /* XXX deal with flags for PVID and untagged */ - - err = rocker_port_vlan(rocker_port, trans, 0, vid); - if (err) - return err; - - err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); - if (err) - rocker_port_vlan(rocker_port, trans, - ROCKER_OP_FLAG_REMOVE, vid); - - return err; -} - -static int rocker_port_vlans_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_add(rocker_port, trans, - vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = 0; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - -static int rocker_port_obj_add(struct net_device *dev, - const struct switchdev_obj *obj, - struct switchdev_trans *trans) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, trans, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); - break; - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_FDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_vlan_del(struct rocker_port *rocker_port, - u16 vid, u16 flags) -{ - int err; - - err = rocker_port_router_mac(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, htons(vid)); - if (err) - return err; - - return rocker_port_vlan(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, vid); -} - -static int rocker_port_vlans_del(struct rocker_port *rocker_port, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_del(rocker_port, vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = ROCKER_OP_FLAG_REMOVE; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - -static int rocker_port_obj_del(struct net_device *dev, - const struct switchdev_obj *obj) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_del(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - break; - case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, NULL, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, - ROCKER_OP_FLAG_REMOVE); - break; - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_del(rocker_port, NULL, - SWITCHDEV_OBJ_PORT_FDB(obj)); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - struct hlist_node *tmp; - unsigned long lock_flags; - int bkt; - int err = 0; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - ether_addr_copy(fdb->addr, found->key.addr); - fdb->ndm_state = NUD_REACHABLE; - fdb->vid = rocker_port_vlan_to_vid(rocker_port, - found->key.vlan_id); - err = cb(&fdb->obj); - if (err) - break; - } - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static int rocker_port_vlan_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan->flags = 0; - if (rocker_vlan_id_is_internal(htons(vid))) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - vlan->vid_begin = vlan->vid_end = vid; - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - -static int rocker_port_obj_dump(struct net_device *dev, - struct switchdev_obj *obj, - switchdev_obj_dump_cb_t *cb) -{ - const struct rocker_port *rocker_port = netdev_priv(dev); - int err = 0; - - switch (obj->id) { - case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_dump(rocker_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); - break; - case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlan_dump(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj), cb); - break; - default: - err = -EOPNOTSUPP; - break; - } - - return err; -} - -static const struct switchdev_ops rocker_port_switchdev_ops = { - .switchdev_port_attr_get = rocker_port_attr_get, - .switchdev_port_attr_set = rocker_port_attr_set, - .switchdev_port_obj_add = rocker_port_obj_add, - .switchdev_port_obj_del = rocker_port_obj_del, - .switchdev_port_obj_dump = rocker_port_obj_dump, -}; - -/******************** - * ethtool interface - ********************/ - -static int rocker_port_get_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); -} - -static int rocker_port_set_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); -} - -static void rocker_port_get_drvinfo(struct net_device *dev, - struct ethtool_drvinfo *drvinfo) -{ - strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); - strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); -} - -static struct rocker_port_stats { - char str[ETH_GSTRING_LEN]; - int type; -} rocker_port_stats[] = { - { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, - { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, - { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, - { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, - - { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, - { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, - { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, - { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, -}; - -#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) - -static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, - u8 *data) -{ - u8 *p = data; - int i; - - switch (stringset) { - case ETH_SS_STATS: - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { - memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); - p += ETH_GSTRING_LEN; - } - break; - } -} - -static int -rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_tlv *cmd_stats; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, - ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) - return -EMSGSIZE; - - cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_stats) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, - rocker_port->pport)) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, cmd_stats); - - return 0; -} - -static int -rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; - const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; - const struct rocker_tlv *pattr; - u32 pport; - u64 *data = priv; - int i; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); - - if (!attrs[ROCKER_TLV_CMD_INFO]) - return -EIO; - - rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, - attrs[ROCKER_TLV_CMD_INFO]); - - if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) - return -EIO; - - pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); - if (pport != rocker_port->pport) - return -EIO; - - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { - pattr = stats_attrs[rocker_port_stats[i].type]; - if (!pattr) - continue; - - data[i] = rocker_tlv_get_u64(pattr); - } - - return 0; -} - -static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, - void *priv) -{ - return rocker_cmd_exec(rocker_port, NULL, 0, - rocker_cmd_get_port_stats_prep, NULL, - rocker_cmd_get_port_stats_ethtool_proc, - priv); -} - -static void rocker_port_get_stats(struct net_device *dev, - struct ethtool_stats *stats, u64 *data) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { - int i; - - for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) - data[i] = 0; - } -} - -static int rocker_port_get_sset_count(struct net_device *netdev, int sset) -{ - switch (sset) { - case ETH_SS_STATS: - return ROCKER_PORT_STATS_LEN; - default: - return -EOPNOTSUPP; - } -} - -static const struct ethtool_ops rocker_port_ethtool_ops = { - .get_settings = rocker_port_get_settings, - .set_settings = rocker_port_set_settings, - .get_drvinfo = rocker_port_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_strings = rocker_port_get_strings, - .get_ethtool_stats = rocker_port_get_stats, - .get_sset_count = rocker_port_get_sset_count, -}; - -/***************** - * NAPI interface - *****************/ - -static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) -{ - return container_of(napi, struct rocker_port, napi_tx); -} - -static int rocker_port_poll_tx(struct napi_struct *napi, int budget) -{ - struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); - const struct rocker *rocker = rocker_port->rocker; - const struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - /* Cleanup tx descriptors */ - while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { - struct sk_buff *skb; - - err = rocker_desc_err(desc_info); - if (err && net_ratelimit()) - netdev_err(rocker_port->dev, "tx desc received with err %d\n", - err); - rocker_tx_desc_frags_unmap(rocker_port, desc_info); - - skb = rocker_desc_cookie_ptr_get(desc_info); - if (err == 0) { - rocker_port->dev->stats.tx_packets++; - rocker_port->dev->stats.tx_bytes += skb->len; - } else { - rocker_port->dev->stats.tx_errors++; - } - - dev_kfree_skb_any(skb); - credits++; - } - - if (credits && netif_queue_stopped(rocker_port->dev)) - netif_wake_queue(rocker_port->dev); - - napi_complete(napi); - rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); - - return 0; -} - -static int rocker_port_rx_proc(const struct rocker *rocker, - const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info) -{ - const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; - struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); - size_t rx_len; - u16 rx_flags = 0; - - if (!skb) - return -ENOENT; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); - if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) - return -EINVAL; - if (attrs[ROCKER_TLV_RX_FLAGS]) - rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); - - rocker_dma_rx_ring_skb_unmap(rocker, attrs); - - rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); - skb_put(skb, rx_len); - skb->protocol = eth_type_trans(skb, rocker_port->dev); - - if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) - skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; - - rocker_port->dev->stats.rx_packets++; - rocker_port->dev->stats.rx_bytes += skb->len; - - netif_receive_skb(skb); - - return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); -} - -static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) -{ - return container_of(napi, struct rocker_port, napi_rx); -} - -static int rocker_port_poll_rx(struct napi_struct *napi, int budget) -{ - struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); - const struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - u32 credits = 0; - int err; - - /* Process rx descriptors */ - while (credits < budget && - (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { - err = rocker_desc_err(desc_info); - if (err) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "rx desc received with err %d\n", - err); - } else { - err = rocker_port_rx_proc(rocker, rocker_port, - desc_info); - if (err && net_ratelimit()) - netdev_err(rocker_port->dev, "rx processing failed with err %d\n", - err); - } - if (err) - rocker_port->dev->stats.rx_errors++; - - rocker_desc_gen_clear(desc_info); - rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); - credits++; - } - - if (credits < budget) - napi_complete(napi); - - rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); - - return credits; -} - -/***************** - * PCI driver ops - *****************/ - -static void rocker_carrier_init(const struct rocker_port *rocker_port) -{ - const struct rocker *rocker = rocker_port->rocker; - u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); - bool link_up; - - link_up = link_status & (1 << rocker_port->pport); - if (link_up) - netif_carrier_on(rocker_port->dev); - else - netif_carrier_off(rocker_port->dev); -} - -static void rocker_remove_ports(const struct rocker *rocker) -{ - struct rocker_port *rocker_port; - int i; - - for (i = 0; i < rocker->port_count; i++) { - rocker_port = rocker->ports[i]; - if (!rocker_port) - continue; - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); - unregister_netdev(rocker_port->dev); - free_netdev(rocker_port->dev); - } - kfree(rocker->ports); -} - -static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) -{ - const struct rocker *rocker = rocker_port->rocker; - const struct pci_dev *pdev = rocker->pdev; - int err; - - err = rocker_cmd_get_port_settings_macaddr(rocker_port, - rocker_port->dev->dev_addr); - if (err) { - dev_warn(&pdev->dev, "failed to get mac address, using random\n"); - eth_hw_addr_random(rocker_port->dev); - } -} - -static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) -{ - const struct pci_dev *pdev = rocker->pdev; - struct rocker_port *rocker_port; - struct net_device *dev; - u16 untagged_vid = 0; - int err; - - dev = alloc_etherdev(sizeof(struct rocker_port)); - if (!dev) - return -ENOMEM; - rocker_port = netdev_priv(dev); - rocker_port->dev = dev; - rocker_port->rocker = rocker; - rocker_port->port_number = port_number; - rocker_port->pport = port_number + 1; - rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; - rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME; - - rocker_port_dev_addr_init(rocker_port); - dev->netdev_ops = &rocker_port_netdev_ops; - dev->ethtool_ops = &rocker_port_ethtool_ops; - dev->switchdev_ops = &rocker_port_switchdev_ops; - netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, - NAPI_POLL_WEIGHT); - netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, - NAPI_POLL_WEIGHT); - rocker_carrier_init(rocker_port); - - dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; - - err = register_netdev(dev); - if (err) { - dev_err(&pdev->dev, "register_netdev failed\n"); - goto err_register_netdev; - } - rocker->ports[port_number] = rocker_port; - - switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); - - rocker_port_set_learning(rocker_port, NULL); - - err = rocker_port_ig_tbl(rocker_port, NULL, 0); - if (err) { - netdev_err(rocker_port->dev, "install ig port table failed\n"); - goto err_port_ig_tbl; - } - - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) { - netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); - goto err_untagged_vlan; - } - - return 0; - -err_untagged_vlan: - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); -err_port_ig_tbl: - rocker->ports[port_number] = NULL; - unregister_netdev(dev); -err_register_netdev: - free_netdev(dev); - return err; -} - -static int rocker_probe_ports(struct rocker *rocker) -{ - int i; - size_t alloc_size; - int err; - - alloc_size = sizeof(struct rocker_port *) * rocker->port_count; - rocker->ports = kzalloc(alloc_size, GFP_KERNEL); - if (!rocker->ports) - return -ENOMEM; - for (i = 0; i < rocker->port_count; i++) { - err = rocker_probe_port(rocker, i); - if (err) - goto remove_ports; - } - return 0; - -remove_ports: - rocker_remove_ports(rocker); - return err; -} - -static int rocker_msix_init(struct rocker *rocker) -{ - struct pci_dev *pdev = rocker->pdev; - int msix_entries; - int i; - int err; - - msix_entries = pci_msix_vec_count(pdev); - if (msix_entries < 0) - return msix_entries; - - if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) - return -EINVAL; - - rocker->msix_entries = kmalloc_array(msix_entries, - sizeof(struct msix_entry), - GFP_KERNEL); - if (!rocker->msix_entries) - return -ENOMEM; - - for (i = 0; i < msix_entries; i++) - rocker->msix_entries[i].entry = i; - - err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); - if (err < 0) - goto err_enable_msix; - - return 0; - -err_enable_msix: - kfree(rocker->msix_entries); - return err; -} - -static void rocker_msix_fini(const struct rocker *rocker) -{ - pci_disable_msix(rocker->pdev); - kfree(rocker->msix_entries); -} - -static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) -{ - struct rocker *rocker; - int err; - - rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); - if (!rocker) - return -ENOMEM; - - err = pci_enable_device(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_device failed\n"); - goto err_pci_enable_device; - } - - err = pci_request_regions(pdev, rocker_driver_name); - if (err) { - dev_err(&pdev->dev, "pci_request_regions failed\n"); - goto err_pci_request_regions; - } - - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); - goto err_pci_set_dma_mask; - } - } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); - goto err_pci_set_dma_mask; - } - } - - if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { - dev_err(&pdev->dev, "invalid PCI region size\n"); - err = -EINVAL; - goto err_pci_resource_len_check; - } - - rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); - if (!rocker->hw_addr) { - dev_err(&pdev->dev, "ioremap failed\n"); - err = -EIO; - goto err_ioremap; - } - pci_set_master(pdev); - - rocker->pdev = pdev; - pci_set_drvdata(pdev, rocker); - - rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); - - err = rocker_msix_init(rocker); - if (err) { - dev_err(&pdev->dev, "MSI-X init failed\n"); - goto err_msix_init; - } - - err = rocker_basic_hw_test(rocker); - if (err) { - dev_err(&pdev->dev, "basic hw test failed\n"); - goto err_basic_hw_test; - } - - rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); - - err = rocker_dma_rings_init(rocker); - if (err) - goto err_dma_rings_init; - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), - rocker_cmd_irq_handler, 0, - rocker_driver_name, rocker); - if (err) { - dev_err(&pdev->dev, "cannot assign cmd irq\n"); - goto err_request_cmd_irq; - } - - err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), - rocker_event_irq_handler, 0, - rocker_driver_name, rocker); - if (err) { - dev_err(&pdev->dev, "cannot assign event irq\n"); - goto err_request_event_irq; - } - - rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - - err = rocker_init_tbls(rocker); - if (err) { - dev_err(&pdev->dev, "cannot init rocker tables\n"); - goto err_init_tbls; - } - - setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup, - (unsigned long) rocker); - mod_timer(&rocker->fdb_cleanup_timer, jiffies); - - err = rocker_probe_ports(rocker); - if (err) { - dev_err(&pdev->dev, "failed to probe ports\n"); - goto err_probe_ports; - } - - dev_info(&pdev->dev, "Rocker switch with id %*phN\n", - (int)sizeof(rocker->hw.id), &rocker->hw.id); - - return 0; - -err_probe_ports: - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); -err_init_tbls: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); -err_request_event_irq: - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); -err_request_cmd_irq: - rocker_dma_rings_fini(rocker); -err_dma_rings_init: -err_basic_hw_test: - rocker_msix_fini(rocker); -err_msix_init: - iounmap(rocker->hw_addr); -err_ioremap: -err_pci_resource_len_check: -err_pci_set_dma_mask: - pci_release_regions(pdev); -err_pci_request_regions: - pci_disable_device(pdev); -err_pci_enable_device: - kfree(rocker); - return err; -} - -static void rocker_remove(struct pci_dev *pdev) -{ - struct rocker *rocker = pci_get_drvdata(pdev); - - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); - rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); - rocker_remove_ports(rocker); - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); - free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); - rocker_dma_rings_fini(rocker); - rocker_msix_fini(rocker); - iounmap(rocker->hw_addr); - pci_release_regions(rocker->pdev); - pci_disable_device(rocker->pdev); - kfree(rocker); -} - -static struct pci_driver rocker_pci_driver = { - .name = rocker_driver_name, - .id_table = rocker_pci_id_table, - .probe = rocker_probe, - .remove = rocker_remove, -}; - -/************************************ - * Net device notifier event handler - ************************************/ - -static bool rocker_port_dev_check(const struct net_device *dev) -{ - return dev->netdev_ops == &rocker_port_netdev_ops; -} - -static int rocker_port_bridge_join(struct rocker_port *rocker_port, - struct net_device *bridge) -{ - u16 untagged_vid = 0; - int err; - - /* Port is joining bridge, so the internal VLAN for the - * port is going to change to the bridge internal VLAN. - * Let's remove untagged VLAN (vid=0) from port and - * re-add once internal VLAN has changed. - */ - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - - rocker_port->bridge_dev = bridge; - switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); - - return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); -} - -static int rocker_port_bridge_leave(struct rocker_port *rocker_port) -{ - u16 untagged_vid = 0; - int err; - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->bridge_dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, - rocker_port->dev->ifindex); - - switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, - false); - rocker_port->bridge_dev = NULL; - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) - return err; - - if (rocker_port->dev->flags & IFF_UP) - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - - -static int rocker_port_ovs_changed(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err; - - rocker_port->bridge_dev = master; - - err = rocker_port_fwd_disable(rocker_port, NULL, 0); - if (err) - return err; - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - -static int rocker_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err = 0; - - if (netif_is_bridge_master(master)) - err = rocker_port_bridge_join(rocker_port, master); - else if (netif_is_ovs_master(master)) - err = rocker_port_ovs_changed(rocker_port, master); - return err; -} - -static int rocker_port_master_unlinked(struct rocker_port *rocker_port) -{ - int err = 0; - - if (rocker_port_is_bridged(rocker_port)) - err = rocker_port_bridge_leave(rocker_port); - else if (rocker_port_is_ovsed(rocker_port)) - err = rocker_port_ovs_changed(rocker_port, NULL); - return err; -} - -static int rocker_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev = netdev_notifier_info_to_dev(ptr); - struct netdev_notifier_changeupper_info *info; - struct rocker_port *rocker_port; - int err; - - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - - switch (event) { - case NETDEV_CHANGEUPPER: - info = ptr; - if (!info->master) - goto out; - rocker_port = netdev_priv(dev); - if (info->linking) { - err = rocker_port_master_linked(rocker_port, - info->upper_dev); - if (err) - netdev_warn(dev, "failed to reflect master linked (err %d)\n", - err); - } else { - err = rocker_port_master_unlinked(rocker_port); - if (err) - netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", - err); - } - break; - } -out: - return NOTIFY_DONE; -} - -static struct notifier_block rocker_netdevice_nb __read_mostly = { - .notifier_call = rocker_netdevice_event, -}; - -/************************************ - * Net event notifier event handler - ************************************/ - -static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) | - ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha); -} - -static int rocker_netevent_event(struct notifier_block *unused, - unsigned long event, void *ptr) -{ - struct net_device *dev; - struct neighbour *n = ptr; - int err; - - switch (event) { - case NETEVENT_NEIGH_UPDATE: - if (n->tbl != &arp_tbl) - return NOTIFY_DONE; - dev = n->dev; - if (!rocker_port_dev_check(dev)) - return NOTIFY_DONE; - err = rocker_neigh_update(dev, n); - if (err) - netdev_warn(dev, - "failed to handle neigh update (err %d)\n", - err); - break; - } - - return NOTIFY_DONE; -} - -static struct notifier_block rocker_netevent_nb __read_mostly = { - .notifier_call = rocker_netevent_event, -}; - -/*********************** - * Module init and exit - ***********************/ - -static int __init rocker_module_init(void) -{ - int err; - - register_netdevice_notifier(&rocker_netdevice_nb); - register_netevent_notifier(&rocker_netevent_nb); - err = pci_register_driver(&rocker_pci_driver); - if (err) - goto err_pci_register_driver; - return 0; - -err_pci_register_driver: - unregister_netevent_notifier(&rocker_netevent_nb); - unregister_netdevice_notifier(&rocker_netdevice_nb); - return err; -} - -static void __exit rocker_module_exit(void) -{ - unregister_netevent_notifier(&rocker_netevent_nb); - unregister_netdevice_notifier(&rocker_netdevice_nb); - pci_unregister_driver(&rocker_pci_driver); -} - -module_init(rocker_module_init); -module_exit(rocker_module_exit); - -MODULE_LICENSE("GPL v2"); -MODULE_AUTHOR("Jiri Pirko "); -MODULE_AUTHOR("Scott Feldman "); -MODULE_DESCRIPTION("Rocker switch device driver"); -MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c new file mode 100644 index 000000000000..44fa9ac36d85 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -0,0 +1,5493 @@ +/* + * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocker_hw.h" + +static const char rocker_driver_name[] = "rocker"; + +static const struct pci_device_id rocker_pci_id_table[] = { + {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0}, + {0, } +}; + +struct rocker_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_pport; + u32 in_pport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_pport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_pport; + u32 in_pport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_pport; + u32 in_pport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct rocker_flow_tbl_entry { + struct hlist_node entry; + u32 cmd; + u64 cookie; + struct rocker_flow_tbl_key key; + size_t key_len; + u32 key_crc32; /* key */ +}; + +struct rocker_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct rocker_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + unsigned long touched; + struct rocker_fdb_tbl_key { + struct rocker_port *rocker_port; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct rocker_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct rocker_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + dma_addr_t mapaddr; +}; + +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +enum { + ROCKER_CTRL_LINK_LOCAL_MCAST, + ROCKER_CTRL_LOCAL_ARP, + ROCKER_CTRL_IPV4_MCAST, + ROCKER_CTRL_IPV6_MCAST, + ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_DFLT_OVS, + ROCKER_CTRL_MAX, +}; + +#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 +#define ROCKER_N_INTERNAL_VLANS 255 +#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) + +struct rocker_port { + struct net_device *dev; + struct net_device *bridge_dev; + struct rocker *rocker; + unsigned int port_number; + u32 pport; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + unsigned long ageing_time; + bool ctrls[ROCKER_CTRL_MAX]; + unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; /* for cmd ring accesses */ + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; /* for flow tbl accesses */ + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; /* for group tbl accesses */ + struct timer_list fdb_cleanup_timer; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ + unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ + u32 neigh_tbl_next_index; +}; + +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + ROCKER_PRIORITY_UNKNOWN = 0, + ROCKER_PRIORITY_IG_PORT = 1, + ROCKER_PRIORITY_VLAN = 1, + ROCKER_PRIORITY_TERM_MAC_UCAST = 0, + ROCKER_PRIORITY_TERM_MAC_MCAST = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_VLAN = 3, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + ROCKER_PRIORITY_BRIDGING_TENANT = 3, + ROCKER_PRIORITY_ACL_CTRL = 3, + ROCKER_PRIORITY_ACL_NORMAL = 2, + ROCKER_PRIORITY_ACL_DFLT = 1, +}; + +static bool rocker_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = rocker_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, + __be16 vlan_id) +{ + if (rocker_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) +{ + return rocker_port->bridge_dev && + netif_is_bridge_master(rocker_port->bridge_dev); +} + +static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) +{ + return rocker_port->bridge_dev && + netif_is_ovs_master(rocker_port->bridge_dev); +} + +#define ROCKER_OP_FLAG_REMOVE BIT(0) +#define ROCKER_OP_FLAG_NOWAIT BIT(1) +#define ROCKER_OP_FLAG_LEARNED BIT(2) +#define ROCKER_OP_FLAG_REFRESH BIT(3) + +static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + struct switchdev_trans_item *elem = NULL; + gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ? + GFP_ATOMIC : GFP_KERNEL; + + /* If in transaction prepare phase, allocate the memory + * and enqueue it on a transaction. If in transaction + * commit phase, dequeue the memory from the transaction + * rather than re-allocating the memory. The idea is the + * driver code paths for prepare and commit are identical + * so the memory allocated in the prepare phase is the + * memory used in the commit phase. + */ + + if (!trans) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + } else if (switchdev_trans_ph_prepare(trans)) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + if (!elem) + return NULL; + switchdev_trans_item_enqueue(trans, elem, kfree, elem); + } else { + elem = switchdev_trans_item_dequeue(trans); + } + + return elem ? elem + 1 : NULL; +} + +static void *rocker_kzalloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + return __rocker_mem_alloc(trans, flags, size); +} + +static void *rocker_kcalloc(struct switchdev_trans *trans, int flags, + size_t n, size_t size) +{ + return __rocker_mem_alloc(trans, flags, n * size); +} + +static void rocker_kfree(struct switchdev_trans *trans, const void *mem) +{ + struct switchdev_trans_item *elem; + + /* Frees are ignored if in transaction prepare phase. The + * memory remains on the per-port list until freed in the + * commit phase. + */ + + if (switchdev_trans_ph_prepare(trans)) + return; + + elem = (struct switchdev_trans_item *) mem - 1; + kfree(elem); +} + +struct rocker_wait { + wait_queue_head_t wait; + bool done; + bool nowait; +}; + +static void rocker_wait_reset(struct rocker_wait *wait) +{ + wait->done = false; + wait->nowait = false; +} + +static void rocker_wait_init(struct rocker_wait *wait) +{ + init_waitqueue_head(&wait->wait); + rocker_wait_reset(wait); +} + +static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + int flags) +{ + struct rocker_wait *wait; + + wait = rocker_kzalloc(trans, flags, sizeof(*wait)); + if (!wait) + return NULL; + rocker_wait_init(wait); + return wait; +} + +static void rocker_wait_destroy(struct switchdev_trans *trans, + struct rocker_wait *wait) +{ + rocker_kfree(trans, wait); +} + +static bool rocker_wait_event_timeout(struct rocker_wait *wait, + unsigned long timeout) +{ + wait_event_timeout(wait->wait, wait->done, HZ / 10); + if (!wait->done) + return false; + return true; +} + +static void rocker_wait_wake_up(struct rocker_wait *wait) +{ + wait->done = true; + wake_up(&wait->wait); +} + +static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector) +{ + return rocker->msix_entries[vector].vector; +} + +static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_TX(rocker_port->port_number)); +} + +static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port) +{ + return rocker_msix_vector(rocker_port->rocker, + ROCKER_MSIX_VEC_RX(rocker_port->port_number)); +} + +#define rocker_write32(rocker, reg, val) \ + writel((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read32(rocker, reg) \ + readl((rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_write64(rocker, reg, val) \ + writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg)) +#define rocker_read64(rocker, reg) \ + readq((rocker)->hw_addr + (ROCKER_ ## reg)) + +/***************************** + * HW basic testing functions + *****************************/ + +static int rocker_reg_test(const struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + u64 test_reg; + u64 rnd; + + rnd = prandom_u32(); + rnd >>= 1; + rocker_write32(rocker, TEST_REG, rnd); + test_reg = rocker_read32(rocker, TEST_REG); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n", + test_reg, rnd * 2); + return -EIO; + } + + rnd = prandom_u32(); + rnd <<= 31; + rnd |= prandom_u32(); + rocker_write64(rocker, TEST_REG64, rnd); + test_reg = rocker_read64(rocker, TEST_REG64); + if (test_reg != rnd * 2) { + dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n", + test_reg, rnd * 2); + return -EIO; + } + + return 0; +} + +static int rocker_dma_test_one(const struct rocker *rocker, + struct rocker_wait *wait, u32 test_type, + dma_addr_t dma_handle, const unsigned char *buf, + const unsigned char *expect, size_t size) +{ + const struct pci_dev *pdev = rocker->pdev; + int i; + + rocker_wait_reset(wait); + rocker_write32(rocker, TEST_DMA_CTRL, test_type); + + if (!rocker_wait_event_timeout(wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + return -EIO; + } + + for (i = 0; i < size; i++) { + if (buf[i] != expect[i]) { + dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected", + buf[i], i, expect[i]); + return -EIO; + } + } + return 0; +} + +#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4) +#define ROCKER_TEST_DMA_FILL_PATTERN 0x96 + +static int rocker_dma_test_offset(const struct rocker *rocker, + struct rocker_wait *wait, int offset) +{ + struct pci_dev *pdev = rocker->pdev; + unsigned char *alloc; + unsigned char *buf; + unsigned char *expect; + dma_addr_t dma_handle; + int i; + int err; + + alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset, + GFP_KERNEL | GFP_DMA); + if (!alloc) + return -ENOMEM; + buf = alloc + offset; + expect = buf + ROCKER_TEST_DMA_BUF_SIZE; + + dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(pdev, dma_handle)) { + err = -EIO; + goto free_alloc; + } + + rocker_write64(rocker, TEST_DMA_ADDR, dma_handle); + rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE); + + memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE); + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + + prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE); + for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++) + expect[i] = ~buf[i]; + err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT, + dma_handle, buf, expect, + ROCKER_TEST_DMA_BUF_SIZE); + if (err) + goto unmap; + +unmap: + pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE, + PCI_DMA_BIDIRECTIONAL); +free_alloc: + kfree(alloc); + + return err; +} + +static int rocker_dma_test(const struct rocker *rocker, + struct rocker_wait *wait) +{ + int i; + int err; + + for (i = 0; i < 8; i++) { + err = rocker_dma_test_offset(rocker, wait, i); + if (err) + return err; + } + return 0; +} + +static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id) +{ + struct rocker_wait *wait = dev_id; + + rocker_wait_wake_up(wait); + + return IRQ_HANDLED; +} + +static int rocker_basic_hw_test(const struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + struct rocker_wait wait; + int err; + + err = rocker_reg_test(rocker); + if (err) { + dev_err(&pdev->dev, "reg test failed\n"); + return err; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), + rocker_test_irq_handler, 0, + rocker_driver_name, &wait); + if (err) { + dev_err(&pdev->dev, "cannot assign test irq\n"); + return err; + } + + rocker_wait_init(&wait); + rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST); + + if (!rocker_wait_event_timeout(&wait, HZ / 10)) { + dev_err(&pdev->dev, "no interrupt received within a timeout\n"); + err = -EIO; + goto free_irq; + } + + err = rocker_dma_test(rocker, &wait); + if (err) + dev_err(&pdev->dev, "dma test failed\n"); + +free_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait); + return err; +} + +/****** + * TLV + ******/ + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = tlv; + } +} + +static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, + const struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +static int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} + +static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + const struct rocker_tlv *start) +{ + desc_info->tlv_size = (const char *) start - desc_info->data; +} + +/****************************************** + * DMA rings and descriptors manipulations + ******************************************/ + +static u32 __pos_inc(u32 pos, size_t limit) +{ + return ++pos == limit ? 0 : pos; +} + +static int rocker_desc_err(const struct rocker_desc_info *desc_info) +{ + int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN; + + switch (err) { + case ROCKER_OK: + return 0; + case -ROCKER_ENOENT: + return -ENOENT; + case -ROCKER_ENXIO: + return -ENXIO; + case -ROCKER_ENOMEM: + return -ENOMEM; + case -ROCKER_EEXIST: + return -EEXIST; + case -ROCKER_EINVAL: + return -EINVAL; + case -ROCKER_EMSGSIZE: + return -EMSGSIZE; + case -ROCKER_ENOTSUP: + return -EOPNOTSUPP; + case -ROCKER_ENOBUFS: + return -ENOBUFS; + } + + return -EINVAL; +} + +static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info) +{ + desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN; +} + +static bool rocker_desc_gen(const struct rocker_desc_info *desc_info) +{ + u32 comp_err = desc_info->desc->comp_err; + + return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false; +} + +static void * +rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info) +{ + return (void *)(uintptr_t)desc_info->desc->cookie; +} + +static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info, + void *ptr) +{ + desc_info->desc->cookie = (uintptr_t) ptr; +} + +static struct rocker_desc_info * +rocker_desc_head_get(const struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + u32 head = __pos_inc(info->head, info->size); + + desc_info = &info->desc_info[info->head]; + if (head == info->tail) + return NULL; /* ring full */ + desc_info->tlv_size = 0; + return desc_info; +} + +static void rocker_desc_commit(const struct rocker_desc_info *desc_info) +{ + desc_info->desc->buf_size = desc_info->data_size; + desc_info->desc->tlv_size = desc_info->tlv_size; +} + +static void rocker_desc_head_set(const struct rocker *rocker, + struct rocker_dma_ring_info *info, + const struct rocker_desc_info *desc_info) +{ + u32 head = __pos_inc(info->head, info->size); + + BUG_ON(head == info->tail); + rocker_desc_commit(desc_info); + info->head = head; + rocker_write32(rocker, DMA_DESC_HEAD(info->type), head); +} + +static struct rocker_desc_info * +rocker_desc_tail_get(struct rocker_dma_ring_info *info) +{ + static struct rocker_desc_info *desc_info; + + if (info->tail == info->head) + return NULL; /* nothing to be done between head and tail */ + desc_info = &info->desc_info[info->tail]; + if (!rocker_desc_gen(desc_info)) + return NULL; /* gen bit not set, desc is not ready yet */ + info->tail = __pos_inc(info->tail, info->size); + desc_info->tlv_size = desc_info->desc->tlv_size; + return desc_info; +} + +static void rocker_dma_ring_credits_set(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + u32 credits) +{ + if (credits) + rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits); +} + +static unsigned long rocker_dma_ring_size_fix(size_t size) +{ + return max(ROCKER_DMA_SIZE_MIN, + min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX)); +} + +static int rocker_dma_ring_create(const struct rocker *rocker, + unsigned int type, + size_t size, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(size != rocker_dma_ring_size_fix(size)); + info->size = size; + info->type = type; + info->head = 0; + info->tail = 0; + info->desc_info = kcalloc(info->size, sizeof(*info->desc_info), + GFP_KERNEL); + if (!info->desc_info) + return -ENOMEM; + + info->desc = pci_alloc_consistent(rocker->pdev, + info->size * sizeof(*info->desc), + &info->mapaddr); + if (!info->desc) { + kfree(info->desc_info); + return -ENOMEM; + } + + for (i = 0; i < info->size; i++) + info->desc_info[i].desc = &info->desc[i]; + + rocker_write32(rocker, DMA_DESC_CTRL(info->type), + ROCKER_DMA_DESC_CTRL_RESET); + rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr); + rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size); + + return 0; +} + +static void rocker_dma_ring_destroy(const struct rocker *rocker, + const struct rocker_dma_ring_info *info) +{ + rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0); + + pci_free_consistent(rocker->pdev, + info->size * sizeof(struct rocker_desc), + info->desc, info->mapaddr); + kfree(info->desc_info); +} + +static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker, + struct rocker_dma_ring_info *info) +{ + int i; + + BUG_ON(info->head || info->tail); + + /* When ring is consumer, we need to advance head for each desc. + * That tells hw that the desc is ready to be used by it. + */ + for (i = 0; i < info->size - 1; i++) + rocker_desc_head_set(rocker, info, &info->desc_info[i]); + rocker_desc_commit(&info->desc_info[i]); +} + +static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + int direction, size_t buf_size) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + int err; + + for (i = 0; i < info->size; i++) { + struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + dma_addr_t dma_handle; + char *buf; + + buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA); + if (!buf) { + err = -ENOMEM; + goto rollback; + } + + dma_handle = pci_map_single(pdev, buf, buf_size, direction); + if (pci_dma_mapping_error(pdev, dma_handle)) { + kfree(buf); + err = -EIO; + goto rollback; + } + + desc_info->data = buf; + desc_info->data_size = buf_size; + dma_unmap_addr_set(desc_info, mapaddr, dma_handle); + + desc->buf_addr = dma_handle; + desc->buf_size = buf_size; + } + return 0; + +rollback: + for (i--; i >= 0; i--) { + const struct rocker_desc_info *desc_info = &info->desc_info[i]; + + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } + return err; +} + +static void rocker_dma_ring_bufs_free(const struct rocker *rocker, + const struct rocker_dma_ring_info *info, + int direction) +{ + struct pci_dev *pdev = rocker->pdev; + int i; + + for (i = 0; i < info->size; i++) { + const struct rocker_desc_info *desc_info = &info->desc_info[i]; + struct rocker_desc *desc = &info->desc[i]; + + desc->buf_addr = 0; + desc->buf_size = 0; + pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr), + desc_info->data_size, direction); + kfree(desc_info->data); + } +} + +static int rocker_dma_rings_init(struct rocker *rocker) +{ + const struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD, + ROCKER_DMA_CMD_DEFAULT_SIZE, + &rocker->cmd_ring); + if (err) { + dev_err(&pdev->dev, "failed to create command dma ring\n"); + return err; + } + + spin_lock_init(&rocker->cmd_ring_lock); + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n"); + goto err_dma_cmd_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, + ROCKER_DMA_EVENT_DEFAULT_SIZE, + &rocker->event_ring); + if (err) { + dev_err(&pdev->dev, "failed to create event dma ring\n"); + goto err_dma_event_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring, + PCI_DMA_FROMDEVICE, PAGE_SIZE); + if (err) { + dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n"); + goto err_dma_event_ring_bufs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring); + return 0; + +err_dma_event_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->event_ring); +err_dma_event_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); + return err; +} + +static void rocker_dma_rings_fini(struct rocker *rocker) +{ + rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); +} + +static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + struct sk_buff *skb, size_t buf_len) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + + dma_handle = pci_map_single(pdev, skb->data, buf_len, + PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(pdev, dma_handle)) + return -EIO; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle)) + goto tlv_put_failure; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len)) + goto tlv_put_failure; + return 0; + +tlv_put_failure: + pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE); + desc_info->tlv_size = 0; + return -EMSGSIZE; +} + +static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port) +{ + return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; +} + +static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + struct net_device *dev = rocker_port->dev; + struct sk_buff *skb; + size_t buf_len = rocker_port_rx_buf_len(rocker_port); + int err; + + /* Ensure that hw will see tlv_size zero in case of an error. + * That tells hw to use another descriptor. + */ + rocker_desc_cookie_ptr_set(desc_info, NULL); + desc_info->tlv_size = 0; + + skb = netdev_alloc_skb_ip_align(dev, buf_len); + if (!skb) + return -ENOMEM; + err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len); + if (err) { + dev_kfree_skb_any(skb); + return err; + } + rocker_desc_cookie_ptr_set(desc_info, skb); + return 0; +} + +static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker, + const struct rocker_tlv **attrs) +{ + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + size_t len; + + if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] || + !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]) + return; + dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]); + len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]); + pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE); +} + +static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker, + const struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + + if (!skb) + return; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + dev_kfree_skb_any(skb); +} + +static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port) +{ + const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; + int i; + int err; + + for (i = 0; i < rx_ring->size; i++) { + err = rocker_dma_rx_ring_skb_alloc(rocker_port, + &rx_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); + return err; +} + +static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port) +{ + const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring; + const struct rocker *rocker = rocker_port->rocker; + int i; + + for (i = 0; i < rx_ring->size; i++) + rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]); +} + +static int rocker_port_dma_rings_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + int err; + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_TX(rocker_port->port_number), + ROCKER_DMA_TX_DEFAULT_SIZE, + &rocker_port->tx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create tx dma ring\n"); + return err; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE, + ROCKER_DMA_TX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n"); + goto err_dma_tx_ring_bufs_alloc; + } + + err = rocker_dma_ring_create(rocker, + ROCKER_DMA_RX(rocker_port->port_number), + ROCKER_DMA_RX_DEFAULT_SIZE, + &rocker_port->rx_ring); + if (err) { + netdev_err(rocker_port->dev, "failed to create rx dma ring\n"); + goto err_dma_rx_ring_create; + } + + err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL, + ROCKER_DMA_RX_DESC_SIZE); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n"); + goto err_dma_rx_ring_bufs_alloc; + } + + err = rocker_dma_rx_ring_skbs_alloc(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n"); + goto err_dma_rx_ring_skbs_alloc; + } + rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring); + + return 0; + +err_dma_rx_ring_skbs_alloc: + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); +err_dma_rx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); +err_dma_rx_ring_create: + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); +err_dma_tx_ring_bufs_alloc: + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); + return err; +} + +static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + + rocker_dma_rx_ring_skbs_free(rocker_port); + rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring, + PCI_DMA_BIDIRECTIONAL); + rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring); + rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring, + PCI_DMA_TODEVICE); + rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring); +} + +static void rocker_port_set_enable(const struct rocker_port *rocker_port, + bool enable) +{ + u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); + + if (enable) + val |= 1ULL << rocker_port->pport; + else + val &= ~(1ULL << rocker_port->pport); + rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); +} + +/******************************** + * Interrupt handler and helpers + ********************************/ + +static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + const struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + u32 credits = 0; + + spin_lock(&rocker->cmd_ring_lock); + while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) { + wait = rocker_desc_cookie_ptr_get(desc_info); + if (wait->nowait) { + rocker_desc_gen_clear(desc_info); + rocker_wait_destroy(NULL, wait); + } else { + rocker_wait_wake_up(wait); + } + credits++; + } + spin_unlock(&rocker->cmd_ring_lock); + rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits); + + return IRQ_HANDLED; +} + +static void rocker_port_link_up(const struct rocker_port *rocker_port) +{ + netif_carrier_on(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is up\n"); +} + +static void rocker_port_link_down(const struct rocker_port *rocker_port) +{ + netif_carrier_off(rocker_port->dev); + netdev_info(rocker_port->dev, "Link is down\n"); +} + +static int rocker_event_link_change(const struct rocker *rocker, + const struct rocker_tlv *info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1]; + unsigned int port_number; + bool link_up; + struct rocker_port *rocker_port; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] || + !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1; + link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + if (netif_carrier_ok(rocker_port->dev) != link_up) { + if (link_up) + rocker_port_link_up(rocker_port); + else + rocker_port_link_down(rocker_port); + } + + return 0; +} + +static int rocker_port_fdb(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + const unsigned char *addr, + __be16 vlan_id, int flags); + +static int rocker_event_mac_vlan_seen(const struct rocker *rocker, + const struct rocker_tlv *info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1]; + unsigned int port_number; + struct rocker_port *rocker_port; + const unsigned char *addr; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; + __be16 vlan_id; + + rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); + if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] || + !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]) + return -EIO; + port_number = + rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1; + addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]); + vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]); + + if (port_number >= rocker->port_count) + return -EINVAL; + + rocker_port = rocker->ports[port_number]; + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags); +} + +static int rocker_event_process(const struct rocker *rocker, + const struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1]; + const struct rocker_tlv *info; + u16 type; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info); + if (!attrs[ROCKER_TLV_EVENT_TYPE] || + !attrs[ROCKER_TLV_EVENT_INFO]) + return -EIO; + + type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]); + info = attrs[ROCKER_TLV_EVENT_INFO]; + + switch (type) { + case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED: + return rocker_event_link_change(rocker, info); + case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN: + return rocker_event_mac_vlan_seen(rocker, info); + } + + return -EOPNOTSUPP; +} + +static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id) +{ + struct rocker *rocker = dev_id; + const struct pci_dev *pdev = rocker->pdev; + const struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + dev_err(&pdev->dev, "event desc received with err %d\n", + err); + } else { + err = rocker_event_process(rocker, desc_info); + if (err) + dev_err(&pdev->dev, "event processing failed with err %d\n", + err); + } + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker->event_ring, desc_info); + credits++; + } + rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits); + + return IRQ_HANDLED; +} + +static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_tx); + return IRQ_HANDLED; +} + +static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) +{ + struct rocker_port *rocker_port = dev_id; + + napi_schedule(&rocker_port->napi_rx); + return IRQ_HANDLED; +} + +/******************** + * Command interface + ********************/ + +typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv); + +static int rocker_cmd_exec(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_wait *wait; + bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT); + unsigned long lock_flags; + int err; + + wait = rocker_wait_create(rocker_port, trans, flags); + if (!wait) + return -ENOMEM; + wait->nowait = nowait; + + spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); + + desc_info = rocker_desc_head_get(&rocker->cmd_ring); + if (!desc_info) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + err = -EAGAIN; + goto out; + } + + err = prepare(rocker_port, desc_info, prepare_priv); + if (err) { + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + goto out; + } + + rocker_desc_cookie_ptr_set(desc_info, wait); + + if (!switchdev_trans_ph_prepare(trans)) + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + + spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); + + if (nowait) + return 0; + + if (!switchdev_trans_ph_prepare(trans)) + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; + + err = rocker_desc_err(desc_info); + if (err) + return err; + + if (process) + err = process(rocker_port, desc_info, process_priv); + + rocker_desc_gen_clear(desc_info); +out: + rocker_wait_destroy(trans, wait); + return err; +} + +static int +rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + u32 speed; + u8 duplex; + u8 autoneg; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] || + !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) + return -EIO; + + speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]); + duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]); + autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]); + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = SUPPORTED_TP; + ecmd->phy_address = 0xff; + ecmd->port = PORT_TP; + ethtool_cmd_speed_set(ecmd, speed); + ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF; + ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + return 0; +} + +static int +rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + unsigned char *macaddr = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]; + if (!attr) + return -EIO; + + if (rocker_tlv_len(attr) != ETH_ALEN) + return -EINVAL; + + ether_addr_copy(macaddr, rocker_tlv_data(attr)); + return 0; +} + +struct port_name { + char *buf; + size_t len; +}; + +static int +rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + struct port_name *name = priv; + const struct rocker_tlv *attr; + size_t i, j, len; + const char *str; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME]; + if (!attr) + return -EIO; + + len = min_t(size_t, rocker_tlv_len(attr), name->len); + str = rocker_tlv_data(attr); + + /* make sure name only contains alphanumeric characters */ + for (i = j = 0; i < len; ++i) { + if (isalnum(str[i])) { + name->buf[j] = str[i]; + j++; + } + } + + if (j == 0) + return -EIO; + + name->buf[j] = '\0'; + + return 0; +} + +static int +rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ethtool_cmd *ecmd = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, + ethtool_cmd_speed(ecmd))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, + ecmd->duplex)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, + ecmd->autoneg)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const unsigned char *macaddr = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR, + ETH_ALEN, macaddr)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + int mtu = *(int *)priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU, + mtu)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int +rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, + !!(rocker_port->brport_flags & BR_LEARNING))) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + return 0; +} + +static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_ethtool_proc, + ecmd); +} + +static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_macaddr_proc, + macaddr); +} + +static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, + struct ethtool_cmd *ecmd) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_set_port_settings_ethtool_prep, + ecmd, NULL, NULL); +} + +static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, + unsigned char *macaddr) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_set_port_settings_macaddr_prep, + macaddr, NULL, NULL); +} + +static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, + int mtu) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_set_port_settings_mtu_prep, + &mtu, NULL, NULL); +} + +static int rocker_port_set_learning(struct rocker_port *rocker_port, + struct switchdev_trans *trans) +{ + return rocker_cmd_exec(rocker_port, trans, 0, + rocker_cmd_set_port_learning_prep, + NULL, NULL, NULL); +} + +static int +rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + const struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + const struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + const struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + const struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + const struct rocker_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + const struct rocker_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + const struct rocker_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + const struct rocker_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + const struct rocker_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ + +static int rocker_init_tbls(struct rocker *rocker) +{ + hash_init(rocker->flow_tbl); + spin_lock_init(&rocker->flow_tbl_lock); + + hash_init(rocker->group_tbl); + spin_lock_init(&rocker->group_tbl_lock); + + hash_init(rocker->fdb_tbl); + spin_lock_init(&rocker->fdb_tbl_lock); + + hash_init(rocker->internal_vlan_tbl); + spin_lock_init(&rocker->internal_vlan_tbl_lock); + + hash_init(rocker->neigh_tbl); + spin_lock_init(&rocker->neigh_tbl_lock); + + return 0; +} + +static void rocker_free_tbls(struct rocker *rocker) +{ + unsigned long flags; + struct rocker_flow_tbl_entry *flow_entry; + struct rocker_group_tbl_entry *group_entry; + struct rocker_fdb_tbl_entry *fdb_entry; + struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; + struct rocker_neigh_tbl_entry *neigh_entry; + struct hlist_node *tmp; + int bkt; + + spin_lock_irqsave(&rocker->flow_tbl_lock, flags); + hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); + + spin_lock_irqsave(&rocker->group_tbl_lock, flags); + hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); + hash_for_each_safe(rocker->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); + hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); +} + +static struct rocker_flow_tbl_entry * +rocker_flow_tbl_find(const struct rocker *rocker, + const struct rocker_flow_tbl_entry *match) +{ + struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + + hash_for_each_possible(rocker->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, key_len) == 0) + return found; + } + + return NULL; +} + +static int rocker_flow_tbl_add(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + struct rocker_flow_tbl_entry *match) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + match->cookie = found->cookie; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + rocker_kfree(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; + } else { + found = match; + found->cookie = rocker->flow_tbl_next_cookie++; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); + + return rocker_cmd_exec(rocker_port, trans, flags, + rocker_cmd_flow_tbl_add, found, NULL, NULL); +} + +static int rocker_flow_tbl_del(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + struct rocker_flow_tbl_entry *match) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); + + found = rocker_flow_tbl_find(rocker, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; + } + + spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); + + rocker_kfree(trans, match); + + if (found) { + err = rocker_cmd_exec(rocker_port, trans, flags, + rocker_cmd_flow_tbl_del, + found, NULL, NULL); + rocker_kfree(trans, found); + } + + return err; +} + +static int rocker_flow_tbl_do(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + struct rocker_flow_tbl_entry *entry) +{ + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_flow_tbl_del(rocker_port, trans, flags, entry); + else + return rocker_flow_tbl_add(rocker_port, trans, flags, entry); +} + +static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct rocker_flow_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return rocker_flow_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, __be16 vlan_id, + __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct rocker_flow_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = ROCKER_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_pport = in_pport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return rocker_flow_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + u32 in_pport, u32 in_pport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct rocker_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (!ether_addr_equal(eth_dst_mask, ff_mac)) + wild = true; + } + + priority = ROCKER_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = ROCKER_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return rocker_flow_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct rocker_flow_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct rocker_flow_tbl_key, + ucast_routing.group_id); + + return rocker_flow_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, __be16 vlan_id, + __be16 vlan_id_mask, u8 ip_proto, + u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct rocker_flow_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + priority = ROCKER_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (ether_addr_equal(eth_dst_mask, mcast_mac)) + priority = ROCKER_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = ROCKER_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return rocker_flow_tbl_do(rocker_port, trans, flags, entry); +} + +static struct rocker_group_tbl_entry * +rocker_group_tbl_find(const struct rocker *rocker, + const struct rocker_group_tbl_entry *match) +{ + struct rocker_group_tbl_entry *found; + + hash_for_each_possible(rocker->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void rocker_group_tbl_entry_free(struct switchdev_trans *trans, + struct rocker_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + rocker_kfree(trans, entry->group_ids); + break; + default: + break; + } + rocker_kfree(trans, entry); +} + +static int rocker_group_tbl_add(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + struct rocker_group_tbl_entry *match) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long lock_flags; + + spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + rocker_group_tbl_entry_free(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(rocker->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); + + return rocker_cmd_exec(rocker_port, trans, flags, + rocker_cmd_group_tbl_add, found, NULL, NULL); +} + +static int rocker_group_tbl_del(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + struct rocker_group_tbl_entry *match) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_group_tbl_entry *found; + unsigned long lock_flags; + int err = 0; + + spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); + + found = rocker_group_tbl_find(rocker, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); + + rocker_group_tbl_entry_free(trans, match); + + if (found) { + err = rocker_cmd_exec(rocker_port, trans, flags, + rocker_cmd_group_tbl_del, + found, NULL, NULL); + rocker_group_tbl_entry_free(trans, found); + } + + return err; +} + +static int rocker_group_tbl_do(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + struct rocker_group_tbl_entry *entry) +{ + if (flags & ROCKER_OP_FLAG_REMOVE) + return rocker_group_tbl_del(rocker_port, trans, flags, entry); + else + return rocker_group_tbl_add(rocker_port, trans, flags, entry); +} + +static int rocker_group_l2_interface(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u32 out_pport, + int pop_vlan) +{ + struct rocker_group_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + entry->l2_interface.pop_vlan = pop_vlan; + + return rocker_group_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + int flags, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + struct rocker_group_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = rocker_kcalloc(trans, flags, + group_count, sizeof(u32)); + if (!entry->group_ids) { + rocker_kfree(trans, entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return rocker_group_tbl_do(rocker_port, trans, flags, entry); +} + +static int rocker_group_l2_flood(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + return rocker_group_l2_fan_out(rocker_port, trans, flags, + group_count, group_ids, + group_id); +} + +static int rocker_group_l3_unicast(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + u32 index, const u8 *src_mac, const u8 *dst_mac, + __be16 vlan_id, bool ttl_check, u32 pport) +{ + struct rocker_group_tbl_entry *entry; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return rocker_group_tbl_do(rocker_port, trans, flags, entry); +} + +static struct rocker_neigh_tbl_entry * +rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr) +{ + struct rocker_neigh_tbl_entry *found; + + hash_for_each_possible(rocker->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void _rocker_neigh_add(struct rocker *rocker, + struct switchdev_trans *trans, + struct rocker_neigh_tbl_entry *entry) +{ + if (!switchdev_trans_ph_commit(trans)) + entry->index = rocker->neigh_tbl_next_index++; + if (switchdev_trans_ph_prepare(trans)) + return; + entry->ref_count++; + hash_add(rocker->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void _rocker_neigh_del(struct switchdev_trans *trans, + struct rocker_neigh_tbl_entry *entry) +{ + if (switchdev_trans_ph_prepare(trans)) + return; + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + rocker_kfree(trans, entry); + } +} + +static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry, + struct switchdev_trans *trans, + const u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else if (!switchdev_trans_ph_prepare(trans)) { + entry->ref_count++; + } +} + +static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + int flags, __be32 ip_addr, const u8 *eth_dst) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + _rocker_neigh_add(rocker, trans, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + _rocker_neigh_del(trans, found); + } else if (updating) { + _rocker_neigh_update(found, trans, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = rocker_group_l3_unicast(rocker_port, trans, flags, + entry->index, + rocker_port->dev->dev_addr, + entry->eth_dst, + rocker_port->internal_vlan_id, + entry->ttl_check, + rocker_port->pport); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(rocker_port->dev, + "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + rocker_kfree(trans, entry); + + return err; +} + +static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + __be32 ip_addr) +{ + struct net_device *dev = rocker_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) { + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (IS_ERR(n)) + return IS_ERR(n); + } + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = rocker_port_ipv4_neigh(rocker_port, trans, 0, + ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + neigh_release(n); + return err; +} + +static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + __be32 ip_addr, u32 *index) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_neigh_tbl_entry *entry; + struct rocker_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = rocker_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); + + found = rocker_neigh_tbl_find(rocker, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = rocker_port->dev; + _rocker_neigh_add(rocker, trans, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + _rocker_neigh_del(trans, found); + } else if (updating) { + _rocker_neigh_update(found, trans, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); + + if (!adding) + rocker_kfree(trans, entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr); + + return err; +} + +static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + int flags, __be16 vlan_id) +{ + struct rocker_port *p; + const struct rocker *rocker = rocker_port->rocker; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 *group_ids; + u8 group_count = 0; + int err = 0; + int i; + + group_ids = rocker_kcalloc(trans, flags, + rocker->port_count, sizeof(u32)); + if (!group_ids) + return -ENOMEM; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (!p) + continue; + if (!rocker_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + goto no_ports_in_vlan; + + err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id, + group_count, group_ids, group_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + +no_ports_in_vlan: + rocker_kfree(trans, group_ids); + return err; +} + +static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, bool pop_vlan) +{ + const struct rocker *rocker = rocker_port->rocker; + struct rocker_port *p; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + u32 out_pport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) { + out_pport = rocker_port->pport; + err = rocker_group_l2_interface(rocker_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < rocker->port_count; i++) { + p = rocker->ports[i]; + if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_pport = 0; + err = rocker_group_l2_interface(rocker_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct rocker_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} rocker_ctrls[] = { + [ROCKER_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [ROCKER_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [ROCKER_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, + [ROCKER_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, +}; + +static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + const struct rocker_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport = rocker_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; + const u8 *eth_src = NULL; + const u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + int err; + + err = rocker_flow_tbl_acl(rocker_port, trans, flags, + in_pport, in_pport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + int flags, + const struct rocker_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + err = rocker_flow_tbl_bridge(rocker_port, trans, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + const struct rocker_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + err = rocker_flow_tbl_term_mac(rocker_port, trans, + rocker_port->pport, in_pport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + const struct rocker_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags, + ctrl, vlan_id); + + if (ctrl->term) + return rocker_port_ctrl_vlan_term(rocker_port, trans, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (rocker_port->ctrls[i]) { + err = rocker_port_ctrl_vlan(rocker_port, trans, flags, + &rocker_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int rocker_port_ctrl(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + const struct rocker_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + err = rocker_port_ctrl_vlan(rocker_port, trans, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int rocker_port_vlan(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_pport = rocker_port->pport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); + + if (adding && test_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && !test_bit(ntohs(internal_vlan_id), + rocker_port->vlan_bitmap)) + return 0; /* already removed */ + + change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); + + if (adding) { + err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port ctrl vlan add\n", err); + goto err_out; + } + } + + err = rocker_port_vlan_l2_groups(rocker_port, trans, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 groups\n", err); + goto err_out; + } + + err = rocker_port_vlan_flood_group(rocker_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 flood group\n", err); + goto err_out; + } + + err = rocker_flow_tbl_vlan(rocker_port, trans, flags, + in_pport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) port VLAN table\n", err); + +err_out: + if (switchdev_trans_ph_prepare(trans)) + change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); + + return err; +} + +static int rocker_port_ig_tbl(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_pport; + u32 in_pport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_pport = 0; + in_pport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = rocker_flow_tbl_ig_port(rocker_port, trans, flags, + in_pport, in_pport_mask, + goto_tbl); + if (err) + netdev_err(rocker_port->dev, + "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct rocker_fdb_learn_work { + struct work_struct work; + struct rocker_port *rocker_port; + struct switchdev_trans *trans; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void rocker_port_fdb_learn_work(struct work_struct *work) +{ + const struct rocker_fdb_learn_work *lw = + container_of(work, struct rocker_fdb_learn_work, work); + bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); + bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); + struct switchdev_notifier_fdb_info info; + + info.addr = lw->addr; + info.vid = lw->vid; + + rtnl_lock(); + if (learned && removing) + call_switchdev_notifiers(SWITCHDEV_FDB_DEL, + lw->rocker_port->dev, &info.info); + else if (learned && !removing) + call_switchdev_notifiers(SWITCHDEV_FDB_ADD, + lw->rocker_port->dev, &info.info); + rtnl_unlock(); + + rocker_kfree(lw->trans, work); +} + +static int rocker_port_fdb_learn(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + const u8 *addr, __be16 vlan_id) +{ + struct rocker_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_pport = rocker_port->pport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (rocker_port_is_bridged(rocker_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + + if (!(flags & ROCKER_OP_FLAG_REFRESH)) { + err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr, + NULL, vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!rocker_port_is_bridged(rocker_port)) + return 0; + + lw = rocker_kzalloc(trans, flags, sizeof(*lw)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, rocker_port_fdb_learn_work); + + lw->rocker_port = rocker_port; + lw->trans = trans; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); + + if (switchdev_trans_ph_prepare(trans)) + rocker_kfree(trans, lw); + else + schedule_work(&lw->work); + + return 0; +} + +static struct rocker_fdb_tbl_entry * +rocker_fdb_tbl_find(const struct rocker *rocker, + const struct rocker_fdb_tbl_entry *match) +{ + struct rocker_fdb_tbl_entry *found; + + hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int rocker_port_fdb(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *fdb; + struct rocker_fdb_tbl_entry *found; + bool removing = (flags & ROCKER_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = rocker_kzalloc(trans, flags, sizeof(*fdb)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); + fdb->touched = jiffies; + fdb->key.rocker_port = rocker_port; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + found = rocker_fdb_tbl_find(rocker, fdb); + + if (found) { + found->touched = jiffies; + if (removing) { + rocker_kfree(trans, fdb); + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + } else if (!removing) { + if (!switchdev_trans_ph_prepare(trans)) + hash_add(rocker->fdb_tbl, &fdb->entry, + fdb->key_crc32); + } + + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + rocker_kfree(trans, fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= ROCKER_OP_FLAG_REFRESH; + } + + return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id); +} + +static int rocker_port_fdb_flush(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + unsigned long lock_flags; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (rocker_port->stp_state == BR_STATE_LEARNING || + rocker_port->stp_state == BR_STATE_FORWARDING) + return 0; + + flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.rocker_port != rocker_port) + continue; + if (!found->learned) + continue; + err = rocker_port_fdb_learn(rocker_port, trans, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + return err; +} + +static void rocker_fdb_cleanup(unsigned long data) +{ + struct rocker *rocker = (struct rocker *)data; + struct rocker_port *rocker_port; + struct rocker_fdb_tbl_entry *entry; + struct hlist_node *tmp; + unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; + unsigned long expires; + unsigned long lock_flags; + int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE | + ROCKER_OP_FLAG_LEARNED; + int bkt; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) { + if (!entry->learned) + continue; + rocker_port = entry->key.rocker_port; + expires = entry->touched + rocker_port->ageing_time; + if (time_before_eq(expires, jiffies)) { + rocker_port_fdb_learn(rocker_port, NULL, + flags, entry->key.addr, + entry->key.vlan_id); + hash_del(&entry->entry); + } else if (time_before(expires, next_timer)) { + next_timer = expires; + } + } + + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer)); +} + +static int rocker_port_router_mac(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = rocker_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = rocker_flow_tbl_term_mac(rocker_port, trans, + rocker_port->pport, in_pport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = rocker_flow_tbl_term_mac(rocker_port, trans, + rocker_port->pport, in_pport_mask, + eth_type, rocker_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int rocker_port_fwding(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags) +{ + bool pop_vlan; + u32 out_pport; + __be16 vlan_id; + u16 vid; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (rocker_port->stp_state != BR_STATE_LEARNING && + rocker_port->stp_state != BR_STATE_FORWARDING) + flags |= ROCKER_OP_FLAG_REMOVE; + + out_pport = rocker_port->pport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = rocker_vlan_id_is_internal(vlan_id); + err = rocker_group_l2_interface(rocker_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(rocker_port->dev, + "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + return 0; +} + +static int rocker_port_stp_update(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags, + u8 state) +{ + bool want[ROCKER_CTRL_MAX] = { 0, }; + bool prev_ctrls[ROCKER_CTRL_MAX]; + u8 uninitialized_var(prev_state); + int err; + int i; + + if (switchdev_trans_ph_prepare(trans)) { + memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls)); + prev_state = rocker_port->stp_state; + } + + if (rocker_port->stp_state == state) + return 0; + + rocker_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + if (!rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; + want[ROCKER_CTRL_IPV4_MCAST] = true; + want[ROCKER_CTRL_IPV6_MCAST] = true; + if (rocker_port_is_bridged(rocker_port)) + want[ROCKER_CTRL_DFLT_BRIDGING] = true; + else if (rocker_port_is_ovsed(rocker_port)) + want[ROCKER_CTRL_DFLT_OVS] = true; + else + want[ROCKER_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < ROCKER_CTRL_MAX; i++) { + if (want[i] != rocker_port->ctrls[i]) { + int ctrl_flags = flags | + (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); + err = rocker_port_ctrl(rocker_port, trans, ctrl_flags, + &rocker_ctrls[i]); + if (err) + goto err_out; + rocker_port->ctrls[i] = want[i]; + } + } + + err = rocker_port_fdb_flush(rocker_port, trans, flags); + if (err) + goto err_out; + + err = rocker_port_fwding(rocker_port, trans, flags); + +err_out: + if (switchdev_trans_ph_prepare(trans)) { + memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); + rocker_port->stp_state = prev_state; + } + + return err; +} + +static int rocker_port_fwd_enable(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return rocker_port_stp_update(rocker_port, trans, flags, + BR_STATE_FORWARDING); +} + +static int rocker_port_fwd_disable(struct rocker_port *rocker_port, + struct switchdev_trans *trans, int flags) +{ + if (rocker_port_is_bridged(rocker_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return rocker_port_stp_update(rocker_port, trans, flags, + BR_STATE_DISABLED); +} + +static struct rocker_internal_vlan_tbl_entry * +rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex) +{ + struct rocker_internal_vlan_tbl_entry *found; + + hash_for_each_possible(rocker->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *entry; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static void +rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port, + int ifindex) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); + + found = rocker_internal_vlan_tbl_find(rocker, ifindex); + if (!found) { + netdev_err(rocker_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, rocker->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); +} + +static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, + struct switchdev_trans *trans, __be32 dst, + int dst_len, const struct fib_info *fi, + u32 tb_id, int flags) +{ + const struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = rocker_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == rocker_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = rocker_port_ipv4_nh(rocker_port, trans, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; + + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } + + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } + + err = rocker_port_fwd_enable(rocker_port, NULL, 0); + if (err) + goto err_fwd_enable; + + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); + return 0; + +err_fwd_enable: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; +} + +static int rocker_port_stop(struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_port_fwd_disable(rocker_port, NULL, + ROCKER_OP_FLAG_NOWAIT); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); + + return 0; +} + +static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } +} + +static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) +{ + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; + + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; + } + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); + return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; +} + +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (unlikely(!desc_info)) { + if (net_ratelimit()) + netdev_err(dev, "tx ring full when queue awake\n"); + return NETDEV_TX_BUSY; + } + + rocker_desc_cookie_ptr_set(desc_info, skb); + + frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS); + if (!frags) + goto out; + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb->data, skb_headlen(skb)); + if (err) + goto nest_cancel; + if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) { + err = skb_linearize(skb); + if (err) + goto unmap_frags; + } + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + err = rocker_tx_desc_frag_map_put(rocker_port, desc_info, + skb_frag_address(frag), + skb_frag_size(frag)); + if (err) + goto unmap_frags; + } + rocker_tlv_nest_end(desc_info, frags); + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info); + + desc_info = rocker_desc_head_get(&rocker_port->tx_ring); + if (!desc_info) + netif_stop_queue(dev); + + return NETDEV_TX_OK; + +unmap_frags: + rocker_tx_desc_frags_unmap(rocker_port, desc_info); +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frags); +out: + dev_kfree_skb(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static int rocker_port_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data); + if (err) + return err; + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + return 0; +} + +static int rocker_port_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int running = netif_running(dev); + int err; + +#define ROCKER_PORT_MIN_MTU 68 +#define ROCKER_PORT_MAX_MTU 9000 + + if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU) + return -EINVAL; + + if (running) + rocker_port_stop(dev); + + netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); + dev->mtu = new_mtu; + + err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); + if (err) + return err; + + if (running) + err = rocker_port_open(dev); + + return err; +} + +static int rocker_port_get_phys_port_name(struct net_device *dev, + char *buf, size_t len) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + struct port_name name = { .buf = buf, .len = len }; + int err; + + err = rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_phys_name_proc, + &name); + + return err ? -EOPNOTSUPP : 0; +} + +static int rocker_port_change_proto_down(struct net_device *dev, + bool proto_down) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_port->dev->flags & IFF_UP) + rocker_port_set_enable(rocker_port, !proto_down); + rocker_port->dev->proto_down = proto_down; + return 0; +} + +static void rocker_port_neigh_destroy(struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(n->dev); + int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *)n->primary_key; + + rocker_port_ipv4_neigh(rocker_port, NULL, + flags, ip_addr, n->ha); +} + +static const struct net_device_ops rocker_port_netdev_ops = { + .ndo_open = rocker_port_open, + .ndo_stop = rocker_port_stop, + .ndo_start_xmit = rocker_port_xmit, + .ndo_set_mac_address = rocker_port_set_mac_address, + .ndo_change_mtu = rocker_port_change_mtu, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_get_phys_port_name = rocker_port_get_phys_port_name, + .ndo_change_proto_down = rocker_port_change_proto_down, + .ndo_neigh_destroy = rocker_port_neigh_destroy, +}; + +/******************** + * swdev interface + ********************/ + +static int rocker_port_attr_get(struct net_device *dev, + struct switchdev_attr *attr) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + const struct rocker *rocker = rocker_port->rocker; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = sizeof(rocker->hw.id); + memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + attr->u.brport_flags = rocker_port->brport_flags; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + unsigned long brport_flags) +{ + unsigned long orig_flags; + int err = 0; + + orig_flags = rocker_port->brport_flags; + rocker_port->brport_flags = brport_flags; + if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING) + err = rocker_port_set_learning(rocker_port, trans); + + if (switchdev_trans_ph_prepare(trans)) + rocker_port->brport_flags = orig_flags; + + return err; +} + +static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + u32 ageing_time) +{ + if (!switchdev_trans_ph_prepare(trans)) { + rocker_port->ageing_time = clock_t_to_jiffies(ageing_time); + mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies); + } + + return 0; +} + +static int rocker_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + err = rocker_port_stp_update(rocker_port, trans, 0, + attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + err = rocker_port_brport_flags_set(rocker_port, trans, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: + err = rocker_port_bridge_ageing_time(rocker_port, trans, + attr->u.ageing_time); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_vlan_add(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + u16 vid, u16 flags) +{ + int err; + + /* XXX deal with flags for PVID and untagged */ + + err = rocker_port_vlan(rocker_port, trans, 0, vid); + if (err) + return err; + + err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); + if (err) + rocker_port_vlan(rocker_port, trans, + ROCKER_OP_FLAG_REMOVE, vid); + + return err; +} + +static int rocker_port_vlans_add(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + const struct switchdev_obj_port_vlan *vlan) +{ + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = rocker_port_vlan_add(rocker_port, trans, + vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int rocker_port_fdb_add(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + const struct switchdev_obj_port_fdb *fdb) +{ + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); + int flags = 0; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); +} + +static int rocker_port_obj_add(struct net_device *dev, + const struct switchdev_obj *obj, + struct switchdev_trans *trans) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + const struct switchdev_obj_ipv4_fib *fib4; + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_port_vlans_add(rocker_port, trans, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); + err = rocker_port_fib_ipv4(rocker_port, trans, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, 0); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_port_fdb_add(rocker_port, trans, + SWITCHDEV_OBJ_PORT_FDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_vlan_del(struct rocker_port *rocker_port, + u16 vid, u16 flags) +{ + int err; + + err = rocker_port_router_mac(rocker_port, NULL, + ROCKER_OP_FLAG_REMOVE, htons(vid)); + if (err) + return err; + + return rocker_port_vlan(rocker_port, NULL, + ROCKER_OP_FLAG_REMOVE, vid); +} + +static int rocker_port_vlans_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = rocker_port_vlan_del(rocker_port, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int rocker_port_fdb_del(struct rocker_port *rocker_port, + struct switchdev_trans *trans, + const struct switchdev_obj_port_fdb *fdb) +{ + __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); + int flags = ROCKER_OP_FLAG_REMOVE; + + if (!rocker_port_is_bridged(rocker_port)) + return -EINVAL; + + return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); +} + +static int rocker_port_obj_del(struct net_device *dev, + const struct switchdev_obj *obj) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + const struct switchdev_obj_ipv4_fib *fib4; + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_port_vlans_del(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj)); + break; + case SWITCHDEV_OBJ_ID_IPV4_FIB: + fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); + err = rocker_port_fib_ipv4(rocker_port, NULL, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, + ROCKER_OP_FLAG_REMOVE); + break; + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_port_fdb_del(rocker_port, NULL, + SWITCHDEV_OBJ_PORT_FDB(obj)); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker *rocker = rocker_port->rocker; + struct rocker_fdb_tbl_entry *found; + struct hlist_node *tmp; + unsigned long lock_flags; + int bkt; + int err = 0; + + spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); + hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.rocker_port != rocker_port) + continue; + ether_addr_copy(fdb->addr, found->key.addr); + fdb->ndm_state = NUD_REACHABLE; + fdb->vid = rocker_port_vlan_to_vid(rocker_port, + found->key.vlan_id); + err = cb(&fdb->obj); + if (err) + break; + } + spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); + + return err; +} + +static int rocker_port_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, rocker_port->vlan_bitmap)) + continue; + vlan->flags = 0; + if (rocker_vlan_id_is_internal(htons(vid))) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + vlan->vid_begin = vid; + vlan->vid_end = vid; + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static int rocker_port_obj_dump(struct net_device *dev, + struct switchdev_obj *obj, + switchdev_obj_dump_cb_t *cb) +{ + const struct rocker_port *rocker_port = netdev_priv(dev); + int err = 0; + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_FDB: + err = rocker_port_fdb_dump(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), cb); + break; + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = rocker_port_vlan_dump(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), cb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static const struct switchdev_ops rocker_port_switchdev_ops = { + .switchdev_port_attr_get = rocker_port_attr_get, + .switchdev_port_attr_set = rocker_port_attr_set, + .switchdev_port_obj_add = rocker_port_obj_add, + .switchdev_port_obj_del = rocker_port_obj_del, + .switchdev_port_obj_dump = rocker_port_obj_dump, +}; + +/******************** + * ethtool interface + ********************/ + +static int rocker_port_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd); +} + +static int rocker_port_set_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd); +} + +static void rocker_port_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *drvinfo) +{ + strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); +} + +static struct rocker_port_stats { + char str[ETH_GSTRING_LEN]; + int type; +} rocker_port_stats[] = { + { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, }, + { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, }, + { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, }, + { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, }, + + { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, }, + { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, }, + { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, }, + { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, }, +}; + +#define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats) + +static void rocker_port_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int +rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct rocker_tlv *cmd_stats; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, + ROCKER_TLV_CMD_TYPE_GET_PORT_STATS)) + return -EMSGSIZE; + + cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_stats) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT, + rocker_port->pport)) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, cmd_stats); + + return 0; +} + +static int +rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1]; + const struct rocker_tlv *pattr; + u32 pport; + u64 *data = priv; + int i; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + + if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]) + return -EIO; + + pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]); + if (pport != rocker_port->pport) + return -EIO; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) { + pattr = stats_attrs[rocker_port_stats[i].type]; + if (!pattr) + continue; + + data[i] = rocker_tlv_get_u64(pattr); + } + + return 0; +} + +static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, + void *priv) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_get_port_stats_prep, NULL, + rocker_cmd_get_port_stats_ethtool_proc, + priv); +} + +static void rocker_port_get_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + + if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) { + int i; + + for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i) + data[i] = 0; + } +} + +static int rocker_port_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ROCKER_PORT_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static const struct ethtool_ops rocker_port_ethtool_ops = { + .get_settings = rocker_port_get_settings, + .set_settings = rocker_port_set_settings, + .get_drvinfo = rocker_port_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = rocker_port_get_strings, + .get_ethtool_stats = rocker_port_get_stats, + .get_sset_count = rocker_port_get_sset_count, +}; + +/***************** + * NAPI interface + *****************/ + +static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_tx); +} + +static int rocker_port_poll_tx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi); + const struct rocker *rocker = rocker_port->rocker; + const struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Cleanup tx descriptors */ + while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) { + struct sk_buff *skb; + + err = rocker_desc_err(desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "tx desc received with err %d\n", + err); + rocker_tx_desc_frags_unmap(rocker_port, desc_info); + + skb = rocker_desc_cookie_ptr_get(desc_info); + if (err == 0) { + rocker_port->dev->stats.tx_packets++; + rocker_port->dev->stats.tx_bytes += skb->len; + } else { + rocker_port->dev->stats.tx_errors++; + } + + dev_kfree_skb_any(skb); + credits++; + } + + if (credits && netif_queue_stopped(rocker_port->dev)) + netif_wake_queue(rocker_port->dev); + + napi_complete(napi); + rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits); + + return 0; +} + +static int rocker_port_rx_proc(const struct rocker *rocker, + const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info) +{ + const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1]; + struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info); + size_t rx_len; + u16 rx_flags = 0; + + if (!skb) + return -ENOENT; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info); + if (!attrs[ROCKER_TLV_RX_FRAG_LEN]) + return -EINVAL; + if (attrs[ROCKER_TLV_RX_FLAGS]) + rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]); + + rocker_dma_rx_ring_skb_unmap(rocker, attrs); + + rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]); + skb_put(skb, rx_len); + skb->protocol = eth_type_trans(skb, rocker_port->dev); + + if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD) + skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark; + + rocker_port->dev->stats.rx_packets++; + rocker_port->dev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info); +} + +static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi) +{ + return container_of(napi, struct rocker_port, napi_rx); +} + +static int rocker_port_poll_rx(struct napi_struct *napi, int budget) +{ + struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi); + const struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + u32 credits = 0; + int err; + + /* Process rx descriptors */ + while (credits < budget && + (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) { + err = rocker_desc_err(desc_info); + if (err) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "rx desc received with err %d\n", + err); + } else { + err = rocker_port_rx_proc(rocker, rocker_port, + desc_info); + if (err && net_ratelimit()) + netdev_err(rocker_port->dev, "rx processing failed with err %d\n", + err); + } + if (err) + rocker_port->dev->stats.rx_errors++; + + rocker_desc_gen_clear(desc_info); + rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info); + credits++; + } + + if (credits < budget) + napi_complete(napi); + + rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits); + + return credits; +} + +/***************** + * PCI driver ops + *****************/ + +static void rocker_carrier_init(const struct rocker_port *rocker_port) +{ + const struct rocker *rocker = rocker_port->rocker; + u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS); + bool link_up; + + link_up = link_status & (1 << rocker_port->pport); + if (link_up) + netif_carrier_on(rocker_port->dev); + else + netif_carrier_off(rocker_port->dev); +} + +static void rocker_remove_ports(const struct rocker *rocker) +{ + struct rocker_port *rocker_port; + int i; + + for (i = 0; i < rocker->port_count; i++) { + rocker_port = rocker->ports[i]; + if (!rocker_port) + continue; + rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); + unregister_netdev(rocker_port->dev); + free_netdev(rocker_port->dev); + } + kfree(rocker->ports); +} + +static void rocker_port_dev_addr_init(struct rocker_port *rocker_port) +{ + const struct rocker *rocker = rocker_port->rocker; + const struct pci_dev *pdev = rocker->pdev; + int err; + + err = rocker_cmd_get_port_settings_macaddr(rocker_port, + rocker_port->dev->dev_addr); + if (err) { + dev_warn(&pdev->dev, "failed to get mac address, using random\n"); + eth_hw_addr_random(rocker_port->dev); + } +} + +static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) +{ + const struct pci_dev *pdev = rocker->pdev; + struct rocker_port *rocker_port; + struct net_device *dev; + u16 untagged_vid = 0; + int err; + + dev = alloc_etherdev(sizeof(struct rocker_port)); + if (!dev) + return -ENOMEM; + rocker_port = netdev_priv(dev); + rocker_port->dev = dev; + rocker_port->rocker = rocker; + rocker_port->port_number = port_number; + rocker_port->pport = port_number + 1; + rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME; + + rocker_port_dev_addr_init(rocker_port); + dev->netdev_ops = &rocker_port_netdev_ops; + dev->ethtool_ops = &rocker_port_ethtool_ops; + dev->switchdev_ops = &rocker_port_switchdev_ops; + netif_tx_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx, + NAPI_POLL_WEIGHT); + netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx, + NAPI_POLL_WEIGHT); + rocker_carrier_init(rocker_port); + + dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "register_netdev failed\n"); + goto err_register_netdev; + } + rocker->ports[port_number] = rocker_port; + + switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); + + rocker_port_set_learning(rocker_port, NULL); + + err = rocker_port_ig_tbl(rocker_port, NULL, 0); + if (err) { + netdev_err(rocker_port->dev, "install ig port table failed\n"); + goto err_port_ig_tbl; + } + + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); + + err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); + if (err) { + netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); + goto err_untagged_vlan; + } + + return 0; + +err_untagged_vlan: + rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); +err_port_ig_tbl: + rocker->ports[port_number] = NULL; + unregister_netdev(dev); +err_register_netdev: + free_netdev(dev); + return err; +} + +static int rocker_probe_ports(struct rocker *rocker) +{ + int i; + size_t alloc_size; + int err; + + alloc_size = sizeof(struct rocker_port *) * rocker->port_count; + rocker->ports = kzalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; + for (i = 0; i < rocker->port_count; i++) { + err = rocker_probe_port(rocker, i); + if (err) + goto remove_ports; + } + return 0; + +remove_ports: + rocker_remove_ports(rocker); + return err; +} + +static int rocker_msix_init(struct rocker *rocker) +{ + struct pci_dev *pdev = rocker->pdev; + int msix_entries; + int i; + int err; + + msix_entries = pci_msix_vec_count(pdev); + if (msix_entries < 0) + return msix_entries; + + if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count)) + return -EINVAL; + + rocker->msix_entries = kmalloc_array(msix_entries, + sizeof(struct msix_entry), + GFP_KERNEL); + if (!rocker->msix_entries) + return -ENOMEM; + + for (i = 0; i < msix_entries; i++) + rocker->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries); + if (err < 0) + goto err_enable_msix; + + return 0; + +err_enable_msix: + kfree(rocker->msix_entries); + return err; +} + +static void rocker_msix_fini(const struct rocker *rocker) +{ + pci_disable_msix(rocker->pdev); + kfree(rocker->msix_entries); +} + +static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct rocker *rocker; + int err; + + rocker = kzalloc(sizeof(*rocker), GFP_KERNEL); + if (!rocker) + return -ENOMEM; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "pci_enable_device failed\n"); + goto err_pci_enable_device; + } + + err = pci_request_regions(pdev, rocker_driver_name); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_pci_request_regions; + } + + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (!err) { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) { + dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } else { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "pci_set_dma_mask failed\n"); + goto err_pci_set_dma_mask; + } + } + + if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) { + dev_err(&pdev->dev, "invalid PCI region size\n"); + err = -EINVAL; + goto err_pci_resource_len_check; + } + + rocker->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (!rocker->hw_addr) { + dev_err(&pdev->dev, "ioremap failed\n"); + err = -EIO; + goto err_ioremap; + } + pci_set_master(pdev); + + rocker->pdev = pdev; + pci_set_drvdata(pdev, rocker); + + rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT); + + err = rocker_msix_init(rocker); + if (err) { + dev_err(&pdev->dev, "MSI-X init failed\n"); + goto err_msix_init; + } + + err = rocker_basic_hw_test(rocker); + if (err) { + dev_err(&pdev->dev, "basic hw test failed\n"); + goto err_basic_hw_test; + } + + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + + err = rocker_dma_rings_init(rocker); + if (err) + goto err_dma_rings_init; + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), + rocker_cmd_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign cmd irq\n"); + goto err_request_cmd_irq; + } + + err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), + rocker_event_irq_handler, 0, + rocker_driver_name, rocker); + if (err) { + dev_err(&pdev->dev, "cannot assign event irq\n"); + goto err_request_event_irq; + } + + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + + err = rocker_init_tbls(rocker); + if (err) { + dev_err(&pdev->dev, "cannot init rocker tables\n"); + goto err_init_tbls; + } + + setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup, + (unsigned long) rocker); + mod_timer(&rocker->fdb_cleanup_timer, jiffies); + + err = rocker_probe_ports(rocker); + if (err) { + dev_err(&pdev->dev, "failed to probe ports\n"); + goto err_probe_ports; + } + + dev_info(&pdev->dev, "Rocker switch with id %*phN\n", + (int)sizeof(rocker->hw.id), &rocker->hw.id); + + return 0; + +err_probe_ports: + del_timer_sync(&rocker->fdb_cleanup_timer); + rocker_free_tbls(rocker); +err_init_tbls: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); +err_request_event_irq: + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); +err_request_cmd_irq: + rocker_dma_rings_fini(rocker); +err_dma_rings_init: +err_basic_hw_test: + rocker_msix_fini(rocker); +err_msix_init: + iounmap(rocker->hw_addr); +err_ioremap: +err_pci_resource_len_check: +err_pci_set_dma_mask: + pci_release_regions(pdev); +err_pci_request_regions: + pci_disable_device(pdev); +err_pci_enable_device: + kfree(rocker); + return err; +} + +static void rocker_remove(struct pci_dev *pdev) +{ + struct rocker *rocker = pci_get_drvdata(pdev); + + del_timer_sync(&rocker->fdb_cleanup_timer); + rocker_free_tbls(rocker); + rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); + rocker_remove_ports(rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); + free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); + rocker_dma_rings_fini(rocker); + rocker_msix_fini(rocker); + iounmap(rocker->hw_addr); + pci_release_regions(rocker->pdev); + pci_disable_device(rocker->pdev); + kfree(rocker); +} + +static struct pci_driver rocker_pci_driver = { + .name = rocker_driver_name, + .id_table = rocker_pci_id_table, + .probe = rocker_probe, + .remove = rocker_remove, +}; + +/************************************ + * Net device notifier event handler + ************************************/ + +static bool rocker_port_dev_check(const struct net_device *dev) +{ + return dev->netdev_ops == &rocker_port_netdev_ops; +} + +static int rocker_port_bridge_join(struct rocker_port *rocker_port, + struct net_device *bridge) +{ + u16 untagged_vid = 0; + int err; + + /* Port is joining bridge, so the internal VLAN for the + * port is going to change to the bridge internal VLAN. + * Let's remove untagged VLAN (vid=0) from port and + * re-add once internal VLAN has changed. + */ + + err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); + if (err) + return err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->dev->ifindex); + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); + + rocker_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); + + return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); +} + +static int rocker_port_bridge_leave(struct rocker_port *rocker_port) +{ + u16 untagged_vid = 0; + int err; + + err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); + if (err) + return err; + + rocker_port_internal_vlan_id_put(rocker_port, + rocker_port->bridge_dev->ifindex); + rocker_port->internal_vlan_id = + rocker_port_internal_vlan_id_get(rocker_port, + rocker_port->dev->ifindex); + + switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, + false); + rocker_port->bridge_dev = NULL; + + err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); + if (err) + return err; + + if (rocker_port->dev->flags & IFF_UP) + err = rocker_port_fwd_enable(rocker_port, NULL, 0); + + return err; +} + +static int rocker_port_ovs_changed(struct rocker_port *rocker_port, + struct net_device *master) +{ + int err; + + rocker_port->bridge_dev = master; + + err = rocker_port_fwd_disable(rocker_port, NULL, 0); + if (err) + return err; + err = rocker_port_fwd_enable(rocker_port, NULL, 0); + + return err; +} + +static int rocker_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + int err = 0; + + if (netif_is_bridge_master(master)) + err = rocker_port_bridge_join(rocker_port, master); + else if (netif_is_ovs_master(master)) + err = rocker_port_ovs_changed(rocker_port, master); + return err; +} + +static int rocker_port_master_unlinked(struct rocker_port *rocker_port) +{ + int err = 0; + + if (rocker_port_is_bridged(rocker_port)) + err = rocker_port_bridge_leave(rocker_port); + else if (rocker_port_is_ovsed(rocker_port)) + err = rocker_port_ovs_changed(rocker_port, NULL); + return err; +} + +static int rocker_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + struct rocker_port *rocker_port; + int err; + + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + if (!info->master) + goto out; + rocker_port = netdev_priv(dev); + if (info->linking) { + err = rocker_port_master_linked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master linked (err %d)\n", + err); + } else { + err = rocker_port_master_unlinked(rocker_port); + if (err) + netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", + err); + } + break; + } +out: + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netdevice_nb __read_mostly = { + .notifier_call = rocker_netdevice_event, +}; + +/************************************ + * Net event notifier event handler + ************************************/ + +static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) +{ + struct rocker_port *rocker_port = netdev_priv(dev); + int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) | + ROCKER_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *)n->primary_key; + + return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha); +} + +static int rocker_netevent_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct neighbour *n = ptr; + int err; + + switch (event) { + case NETEVENT_NEIGH_UPDATE: + if (n->tbl != &arp_tbl) + return NOTIFY_DONE; + dev = n->dev; + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + err = rocker_neigh_update(dev, n); + if (err) + netdev_warn(dev, + "failed to handle neigh update (err %d)\n", + err); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block rocker_netevent_nb __read_mostly = { + .notifier_call = rocker_netevent_event, +}; + +/*********************** + * Module init and exit + ***********************/ + +static int __init rocker_module_init(void) +{ + int err; + + register_netdevice_notifier(&rocker_netdevice_nb); + register_netevent_notifier(&rocker_netevent_nb); + err = pci_register_driver(&rocker_pci_driver); + if (err) + goto err_pci_register_driver; + return 0; + +err_pci_register_driver: + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + return err; +} + +static void __exit rocker_module_exit(void) +{ + unregister_netevent_notifier(&rocker_netevent_nb); + unregister_netdevice_notifier(&rocker_netdevice_nb); + pci_unregister_driver(&rocker_pci_driver); +} + +module_init(rocker_module_init); +module_exit(rocker_module_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jiri Pirko "); +MODULE_AUTHOR("Scott Feldman "); +MODULE_DESCRIPTION("Rocker switch device driver"); +MODULE_DEVICE_TABLE(pci, rocker_pci_id_table); -- cgit v1.2.3 From de1521923c0f2ee9c6328ad6b3e46e662d88a3cd Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:42 +0100 Subject: rocker: push tlv processing into separate files Carve out TLV processing helpers into separate files. Signed-off-by: Jiri Pirko Acked-by: Scott Feldman Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/Makefile | 2 +- drivers/net/ethernet/rocker/rocker.h | 27 ++++ drivers/net/ethernet/rocker/rocker_main.c | 222 +----------------------------- drivers/net/ethernet/rocker/rocker_tlv.c | 53 +++++++ drivers/net/ethernet/rocker/rocker_tlv.h | 201 +++++++++++++++++++++++++++ 5 files changed, 284 insertions(+), 221 deletions(-) create mode 100644 drivers/net/ethernet/rocker/rocker.h create mode 100644 drivers/net/ethernet/rocker/rocker_tlv.c create mode 100644 drivers/net/ethernet/rocker/rocker_tlv.h (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile index 2bed42385076..47f34928f809 100644 --- a/drivers/net/ethernet/rocker/Makefile +++ b/drivers/net/ethernet/rocker/Makefile @@ -3,4 +3,4 @@ # obj-$(CONFIG_ROCKER) += rocker.o -rocker-y := rocker_main.o +rocker-y := rocker_main.o rocker_tlv.o diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h new file mode 100644 index 000000000000..23b5e3dfea45 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker.h @@ -0,0 +1,27 @@ +/* + * drivers/net/ethernet/rocker/rocker.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_H +#define _ROCKER_H + +#include + +#include "rocker_hw.h" + +struct rocker_desc_info { + char *data; /* mapped */ + size_t data_size; + size_t tlv_size; + struct rocker_desc *desc; + dma_addr_t mapaddr; +}; + +#endif diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 44fa9ac36d85..eecbbd59acc1 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -40,6 +40,8 @@ #include #include "rocker_hw.h" +#include "rocker.h" +#include "rocker_tlv.h" static const char rocker_driver_name[] = "rocker"; @@ -177,14 +179,6 @@ struct rocker_neigh_tbl_entry { bool ttl_check; }; -struct rocker_desc_info { - char *data; /* mapped */ - size_t data_size; - size_t tlv_size; - struct rocker_desc *desc; - dma_addr_t mapaddr; -}; - struct rocker_dma_ring_info { size_t size; u32 head; @@ -661,218 +655,6 @@ free_irq: return err; } -/****** - * TLV - ******/ - -#define ROCKER_TLV_ALIGNTO 8U -#define ROCKER_TLV_ALIGN(len) \ - (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) -#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) - -/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * | Header | Pad | Payload | Pad | - * | (struct rocker_tlv) | ing | | ing | - * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ - * <--------------------------- tlv->len --------------------------> - */ - -static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, - int *remaining) -{ - int totlen = ROCKER_TLV_ALIGN(tlv->len); - - *remaining -= totlen; - return (struct rocker_tlv *) ((char *) tlv + totlen); -} - -static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) -{ - return remaining >= (int) ROCKER_TLV_HDRLEN && - tlv->len >= ROCKER_TLV_HDRLEN && - tlv->len <= remaining; -} - -#define rocker_tlv_for_each(pos, head, len, rem) \ - for (pos = head, rem = len; \ - rocker_tlv_ok(pos, rem); \ - pos = rocker_tlv_next(pos, &(rem))) - -#define rocker_tlv_for_each_nested(pos, tlv, rem) \ - rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ - rocker_tlv_len(tlv), rem) - -static int rocker_tlv_attr_size(int payload) -{ - return ROCKER_TLV_HDRLEN + payload; -} - -static int rocker_tlv_total_size(int payload) -{ - return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); -} - -static int rocker_tlv_padlen(int payload) -{ - return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); -} - -static int rocker_tlv_type(const struct rocker_tlv *tlv) -{ - return tlv->type; -} - -static void *rocker_tlv_data(const struct rocker_tlv *tlv) -{ - return (char *) tlv + ROCKER_TLV_HDRLEN; -} - -static int rocker_tlv_len(const struct rocker_tlv *tlv) -{ - return tlv->len - ROCKER_TLV_HDRLEN; -} - -static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) -{ - return *(u8 *) rocker_tlv_data(tlv); -} - -static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) -{ - return *(u16 *) rocker_tlv_data(tlv); -} - -static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) -{ - return *(__be16 *) rocker_tlv_data(tlv); -} - -static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) -{ - return *(u32 *) rocker_tlv_data(tlv); -} - -static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) -{ - return *(u64 *) rocker_tlv_data(tlv); -} - -static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, - const char *buf, int buf_len) -{ - const struct rocker_tlv *tlv; - const struct rocker_tlv *head = (const struct rocker_tlv *) buf; - int rem; - - memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); - - rocker_tlv_for_each(tlv, head, buf_len, rem) { - u32 type = rocker_tlv_type(tlv); - - if (type > 0 && type <= maxtype) - tb[type] = tlv; - } -} - -static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype, - const struct rocker_tlv *tlv) -{ - rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), - rocker_tlv_len(tlv)); -} - -static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, - const struct rocker_desc_info *desc_info) -{ - rocker_tlv_parse(tb, maxtype, desc_info->data, - desc_info->desc->tlv_size); -} - -static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info) -{ - return (struct rocker_tlv *) ((char *) desc_info->data + - desc_info->tlv_size); -} - -static int rocker_tlv_put(struct rocker_desc_info *desc_info, - int attrtype, int attrlen, const void *data) -{ - int tail_room = desc_info->data_size - desc_info->tlv_size; - int total_size = rocker_tlv_total_size(attrlen); - struct rocker_tlv *tlv; - - if (unlikely(tail_room < total_size)) - return -EMSGSIZE; - - tlv = rocker_tlv_start(desc_info); - desc_info->tlv_size += total_size; - tlv->type = attrtype; - tlv->len = rocker_tlv_attr_size(attrlen); - memcpy(rocker_tlv_data(tlv), data, attrlen); - memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); - return 0; -} - -static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, - int attrtype, u8 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); -} - -static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, - int attrtype, u16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); -} - -static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, - int attrtype, __be16 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); -} - -static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, - int attrtype, u32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); -} - -static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, - int attrtype, __be32 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); -} - -static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, - int attrtype, u64 value) -{ - return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); -} - -static struct rocker_tlv * -rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) -{ - struct rocker_tlv *start = rocker_tlv_start(desc_info); - - if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) - return NULL; - - return start; -} - -static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, - struct rocker_tlv *start) -{ - start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; -} - -static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, - const struct rocker_tlv *start) -{ - desc_info->tlv_size = (const char *) start - desc_info->data; -} - /****************************************** * DMA rings and descriptors manipulations ******************************************/ diff --git a/drivers/net/ethernet/rocker/rocker_tlv.c b/drivers/net/ethernet/rocker/rocker_tlv.c new file mode 100644 index 000000000000..8185118f3492 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_tlv.c @@ -0,0 +1,53 @@ +/* + * drivers/net/ethernet/rocker/rocker_tlv.c - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include + +#include "rocker_hw.h" +#include "rocker_tlv.h" + +void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len) +{ + const struct rocker_tlv *tlv; + const struct rocker_tlv *head = (const struct rocker_tlv *) buf; + int rem; + + memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1)); + + rocker_tlv_for_each(tlv, head, buf_len, rem) { + u32 type = rocker_tlv_type(tlv); + + if (type > 0 && type <= maxtype) + tb[type] = tlv; + } +} + +int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data) +{ + int tail_room = desc_info->data_size - desc_info->tlv_size; + int total_size = rocker_tlv_total_size(attrlen); + struct rocker_tlv *tlv; + + if (unlikely(tail_room < total_size)) + return -EMSGSIZE; + + tlv = rocker_tlv_start(desc_info); + desc_info->tlv_size += total_size; + tlv->type = attrtype; + tlv->len = rocker_tlv_attr_size(attrlen); + memcpy(rocker_tlv_data(tlv), data, attrlen); + memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen)); + return 0; +} diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h new file mode 100644 index 000000000000..a63ef82e7c72 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_tlv.h @@ -0,0 +1,201 @@ +/* + * drivers/net/ethernet/rocker/rocker_tlv.h - Rocker switch device driver + * Copyright (c) 2014-2016 Jiri Pirko + * Copyright (c) 2014 Scott Feldman + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _ROCKER_TLV_H +#define _ROCKER_TLV_H + +#include + +#include "rocker_hw.h" +#include "rocker.h" + +#define ROCKER_TLV_ALIGNTO 8U +#define ROCKER_TLV_ALIGN(len) \ + (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1)) +#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv)) + +/* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) ---> + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * | Header | Pad | Payload | Pad | + * | (struct rocker_tlv) | ing | | ing | + * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+ + * <--------------------------- tlv->len --------------------------> + */ + +static inline struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv, + int *remaining) +{ + int totlen = ROCKER_TLV_ALIGN(tlv->len); + + *remaining -= totlen; + return (struct rocker_tlv *) ((char *) tlv + totlen); +} + +static inline int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining) +{ + return remaining >= (int) ROCKER_TLV_HDRLEN && + tlv->len >= ROCKER_TLV_HDRLEN && + tlv->len <= remaining; +} + +#define rocker_tlv_for_each(pos, head, len, rem) \ + for (pos = head, rem = len; \ + rocker_tlv_ok(pos, rem); \ + pos = rocker_tlv_next(pos, &(rem))) + +#define rocker_tlv_for_each_nested(pos, tlv, rem) \ + rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \ + rocker_tlv_len(tlv), rem) + +static inline int rocker_tlv_attr_size(int payload) +{ + return ROCKER_TLV_HDRLEN + payload; +} + +static inline int rocker_tlv_total_size(int payload) +{ + return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload)); +} + +static inline int rocker_tlv_padlen(int payload) +{ + return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload); +} + +static inline int rocker_tlv_type(const struct rocker_tlv *tlv) +{ + return tlv->type; +} + +static inline void *rocker_tlv_data(const struct rocker_tlv *tlv) +{ + return (char *) tlv + ROCKER_TLV_HDRLEN; +} + +static inline int rocker_tlv_len(const struct rocker_tlv *tlv) +{ + return tlv->len - ROCKER_TLV_HDRLEN; +} + +static inline u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv) +{ + return *(u8 *) rocker_tlv_data(tlv); +} + +static inline u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv) +{ + return *(u16 *) rocker_tlv_data(tlv); +} + +static inline __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv) +{ + return *(__be16 *) rocker_tlv_data(tlv); +} + +static inline u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv) +{ + return *(u32 *) rocker_tlv_data(tlv); +} + +static inline u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv) +{ + return *(u64 *) rocker_tlv_data(tlv); +} + +void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype, + const char *buf, int buf_len); + +static inline void rocker_tlv_parse_nested(const struct rocker_tlv **tb, + int maxtype, + const struct rocker_tlv *tlv) +{ + rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv), + rocker_tlv_len(tlv)); +} + +static inline void +rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype, + const struct rocker_desc_info *desc_info) +{ + rocker_tlv_parse(tb, maxtype, desc_info->data, + desc_info->desc->tlv_size); +} + +static inline struct rocker_tlv * +rocker_tlv_start(struct rocker_desc_info *desc_info) +{ + return (struct rocker_tlv *) ((char *) desc_info->data + + desc_info->tlv_size); +} + +int rocker_tlv_put(struct rocker_desc_info *desc_info, + int attrtype, int attrlen, const void *data); + +static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, + int attrtype, u8 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); +} + +static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, + int attrtype, u16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); +} + +static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, + int attrtype, __be16 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); +} + +static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, + int attrtype, u32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); +} + +static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, + int attrtype, __be32 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); +} + +static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, + int attrtype, u64 value) +{ + return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); +} + +static inline struct rocker_tlv * +rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype) +{ + struct rocker_tlv *start = rocker_tlv_start(desc_info); + + if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0) + return NULL; + + return start; +} + +static inline void rocker_tlv_nest_end(struct rocker_desc_info *desc_info, + struct rocker_tlv *start) +{ + start->len = (char *) rocker_tlv_start(desc_info) - (char *) start; +} + +static inline void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info, + const struct rocker_tlv *start) +{ + desc_info->tlv_size = (const char *) start - desc_info->data; +} + +#endif -- cgit v1.2.3 From e1ba3dee77c450fe3cea00a334f30174f5cc0aa1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:43 +0100 Subject: rocker: implement get settings mode command Introduce a helper to ask HW for the port mode (world). Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 32 +++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index eecbbd59acc1..1a99b358afd0 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1495,6 +1495,30 @@ rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port, return 0; } +static int +rocker_cmd_get_port_settings_mode_proc(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv) +{ + u8 *p_mode = priv; + const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1]; + const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1]; + const struct rocker_tlv *attr; + + rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info); + if (!attrs[ROCKER_TLV_CMD_INFO]) + return -EIO; + + rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX, + attrs[ROCKER_TLV_CMD_INFO]); + attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]; + if (!attr) + return -EIO; + + *p_mode = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]); + return 0; +} + struct port_name { char *buf; size_t len; @@ -1660,6 +1684,14 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, macaddr); } +static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, + u8 *p_mode) +{ + return rocker_cmd_exec(rocker_port, NULL, 0, + rocker_cmd_get_port_settings_prep, NULL, + rocker_cmd_get_port_settings_mode_proc, p_mode); +} + static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { -- cgit v1.2.3 From 0514c4e809a93544253ba5fb797379fa9cc45cce Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:44 +0100 Subject: rocker: move rocker and rocker_port structs into header And take some other related thing along. They are going to be pushed into of-dpa part anyway. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.h | 75 +++++++++++++++++++++++++++++++ drivers/net/ethernet/rocker/rocker_main.c | 73 ------------------------------ 2 files changed, 75 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 23b5e3dfea45..05c1e1aa2002 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -13,6 +13,8 @@ #define _ROCKER_H #include +#include +#include #include "rocker_hw.h" @@ -24,4 +26,77 @@ struct rocker_desc_info { dma_addr_t mapaddr; }; +struct rocker_dma_ring_info { + size_t size; + u32 head; + u32 tail; + struct rocker_desc *desc; /* mapped */ + dma_addr_t mapaddr; + struct rocker_desc_info *desc_info; + unsigned int type; +}; + +struct rocker; + +enum { + ROCKER_CTRL_LINK_LOCAL_MCAST, + ROCKER_CTRL_LOCAL_ARP, + ROCKER_CTRL_IPV4_MCAST, + ROCKER_CTRL_IPV6_MCAST, + ROCKER_CTRL_DFLT_BRIDGING, + ROCKER_CTRL_DFLT_OVS, + ROCKER_CTRL_MAX, +}; + +#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 +#define ROCKER_N_INTERNAL_VLANS 255 +#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) + +struct rocker_port { + struct net_device *dev; + struct net_device *bridge_dev; + struct rocker *rocker; + unsigned int port_number; + u32 pport; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + unsigned long ageing_time; + bool ctrls[ROCKER_CTRL_MAX]; + unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; + struct napi_struct napi_tx; + struct napi_struct napi_rx; + struct rocker_dma_ring_info tx_ring; + struct rocker_dma_ring_info rx_ring; +}; + +struct rocker { + struct pci_dev *pdev; + u8 __iomem *hw_addr; + struct msix_entry *msix_entries; + unsigned int port_count; + struct rocker_port **ports; + struct { + u64 id; + } hw; + spinlock_t cmd_ring_lock; /* for cmd ring accesses */ + struct rocker_dma_ring_info cmd_ring; + struct rocker_dma_ring_info event_ring; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; /* for flow tbl accesses */ + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; /* for group tbl accesses */ + struct timer_list fdb_cleanup_timer; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ + unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ + u32 neigh_tbl_next_index; +}; + #endif diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 1a99b358afd0..a67a6c7ae57f 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -179,79 +179,6 @@ struct rocker_neigh_tbl_entry { bool ttl_check; }; -struct rocker_dma_ring_info { - size_t size; - u32 head; - u32 tail; - struct rocker_desc *desc; /* mapped */ - dma_addr_t mapaddr; - struct rocker_desc_info *desc_info; - unsigned int type; -}; - -struct rocker; - -enum { - ROCKER_CTRL_LINK_LOCAL_MCAST, - ROCKER_CTRL_LOCAL_ARP, - ROCKER_CTRL_IPV4_MCAST, - ROCKER_CTRL_IPV6_MCAST, - ROCKER_CTRL_DFLT_BRIDGING, - ROCKER_CTRL_DFLT_OVS, - ROCKER_CTRL_MAX, -}; - -#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 -#define ROCKER_N_INTERNAL_VLANS 255 -#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) -#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) - -struct rocker_port { - struct net_device *dev; - struct net_device *bridge_dev; - struct rocker *rocker; - unsigned int port_number; - u32 pport; - __be16 internal_vlan_id; - int stp_state; - u32 brport_flags; - unsigned long ageing_time; - bool ctrls[ROCKER_CTRL_MAX]; - unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; - struct napi_struct napi_tx; - struct napi_struct napi_rx; - struct rocker_dma_ring_info tx_ring; - struct rocker_dma_ring_info rx_ring; -}; - -struct rocker { - struct pci_dev *pdev; - u8 __iomem *hw_addr; - struct msix_entry *msix_entries; - unsigned int port_count; - struct rocker_port **ports; - struct { - u64 id; - } hw; - spinlock_t cmd_ring_lock; /* for cmd ring accesses */ - struct rocker_dma_ring_info cmd_ring; - struct rocker_dma_ring_info event_ring; - DECLARE_HASHTABLE(flow_tbl, 16); - spinlock_t flow_tbl_lock; /* for flow tbl accesses */ - u64 flow_tbl_next_cookie; - DECLARE_HASHTABLE(group_tbl, 16); - spinlock_t group_tbl_lock; /* for group tbl accesses */ - struct timer_list fdb_cleanup_timer; - DECLARE_HASHTABLE(fdb_tbl, 16); - spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ - unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; - DECLARE_HASHTABLE(internal_vlan_tbl, 8); - spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ - DECLARE_HASHTABLE(neigh_tbl, 16); - spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ - u32 neigh_tbl_next_index; -}; - static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -- cgit v1.2.3 From e420114eef4a3a5025a243b89b0dc343101e3d3c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:45 +0100 Subject: rocker: introduce worlds infrastructure This is another step on the way to per-world clean cut. Introduce world ops hooks which each world can implement in world-specific way. Also introduce world infrastructure along with OF-DPA world stub. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/Makefile | 2 +- drivers/net/ethernet/rocker/rocker.h | 69 +++++ drivers/net/ethernet/rocker/rocker_main.c | 450 ++++++++++++++++++++++++++++- drivers/net/ethernet/rocker/rocker_ofdpa.c | 27 ++ 4 files changed, 545 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/rocker/rocker_ofdpa.c (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/Makefile b/drivers/net/ethernet/rocker/Makefile index 47f34928f809..faa36acee223 100644 --- a/drivers/net/ethernet/rocker/Makefile +++ b/drivers/net/ethernet/rocker/Makefile @@ -3,4 +3,4 @@ # obj-$(CONFIG_ROCKER) += rocker.o -rocker-y := rocker_main.o rocker_tlv.o +rocker-y := rocker_main.o rocker_tlv.o rocker_ofdpa.o diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 05c1e1aa2002..5fd0e3520fd6 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -12,9 +12,13 @@ #ifndef _ROCKER_H #define _ROCKER_H +#include #include #include #include +#include +#include +#include #include "rocker_hw.h" @@ -57,6 +61,7 @@ struct rocker_port { struct net_device *dev; struct net_device *bridge_dev; struct rocker *rocker; + void *wpriv; unsigned int port_number; u32 pport; __be16 internal_vlan_id; @@ -71,6 +76,8 @@ struct rocker_port { struct rocker_dma_ring_info rx_ring; }; +struct rocker_world_ops; + struct rocker { struct pci_dev *pdev; u8 __iomem *hw_addr; @@ -83,6 +90,8 @@ struct rocker { spinlock_t cmd_ring_lock; /* for cmd ring accesses */ struct rocker_dma_ring_info cmd_ring; struct rocker_dma_ring_info event_ring; + struct rocker_world_ops *wops; + void *wpriv; DECLARE_HASHTABLE(flow_tbl, 16); spinlock_t flow_tbl_lock; /* for flow tbl accesses */ u64 flow_tbl_next_cookie; @@ -99,4 +108,64 @@ struct rocker { u32 neigh_tbl_next_index; }; +struct rocker_world_ops { + const char *kind; + size_t priv_size; + size_t port_priv_size; + u8 mode; + int (*init)(struct rocker *rocker); + void (*fini)(struct rocker *rocker); + int (*port_pre_init)(struct rocker_port *rocker_port); + int (*port_init)(struct rocker_port *rocker_port); + void (*port_fini)(struct rocker_port *rocker_port); + void (*port_post_fini)(struct rocker_port *rocker_port); + int (*port_open)(struct rocker_port *rocker_port); + void (*port_stop)(struct rocker_port *rocker_port); + int (*port_attr_stp_state_set)(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans); + int (*port_attr_bridge_flags_set)(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans); + int (*port_attr_bridge_flags_get)(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags); + int (*port_attr_bridge_ageing_time_set)(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans); + int (*port_obj_vlan_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans); + int (*port_obj_vlan_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan); + int (*port_obj_vlan_dump)(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb); + int (*port_obj_fib4_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans); + int (*port_obj_fib4_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4); + int (*port_obj_fdb_add)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans); + int (*port_obj_fdb_del)(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb); + int (*port_obj_fdb_dump)(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb); + int (*port_master_linked)(struct rocker_port *rocker_port, + struct net_device *master); + int (*port_master_unlinked)(struct rocker_port *rocker_port, + struct net_device *master); + int (*port_neigh_update)(struct rocker_port *rocker_port, + struct neighbour *n); + int (*port_neigh_destroy)(struct rocker_port *rocker_port, + struct neighbour *n); + int (*port_ev_mac_vlan_seen)(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id); +}; + +extern struct rocker_world_ops rocker_ofdpa_ops; + #endif diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index a67a6c7ae57f..871ccbe107fc 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1164,6 +1164,9 @@ static int rocker_port_fdb(struct rocker_port *rocker_port, struct switchdev_trans *trans, const unsigned char *addr, __be16 vlan_id, int flags); +static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id); static int rocker_event_mac_vlan_seen(const struct rocker *rocker, const struct rocker_tlv *info) @@ -1174,6 +1177,7 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker, const unsigned char *addr; int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; __be16 vlan_id; + int err; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || @@ -1190,6 +1194,10 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker, rocker_port = rocker->ports[port_number]; + err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); + if (err) + return err; + if (rocker_port->stp_state != BR_STATE_LEARNING && rocker_port->stp_state != BR_STATE_FORWARDING) return 0; @@ -1651,6 +1659,335 @@ static int rocker_port_set_learning(struct rocker_port *rocker_port, NULL, NULL, NULL); } +/********************** + * Worlds manipulation + **********************/ + +static struct rocker_world_ops *rocker_world_ops[] = { + &rocker_ofdpa_ops, +}; + +#define ROCKER_WORLD_OPS_LEN ARRAY_SIZE(rocker_world_ops) + +static struct rocker_world_ops *rocker_world_ops_find(u8 mode) +{ + int i; + + for (i = 0; i < ROCKER_WORLD_OPS_LEN; i++) + if (rocker_world_ops[i]->mode == mode) + return rocker_world_ops[i]; + return NULL; +} + +static int rocker_world_init(struct rocker *rocker, u8 mode) +{ + struct rocker_world_ops *wops; + int err; + + wops = rocker_world_ops_find(mode); + if (!wops) { + dev_err(&rocker->pdev->dev, "port mode \"%d\" is not supported\n", + mode); + return -EINVAL; + } + rocker->wops = wops; + rocker->wpriv = kzalloc(wops->priv_size, GFP_KERNEL); + if (!rocker->wpriv) + return -ENOMEM; + if (!wops->init) + return 0; + err = wops->init(rocker); + if (err) + kfree(rocker->wpriv); + return err; +} + +static void rocker_world_fini(struct rocker *rocker) +{ + struct rocker_world_ops *wops = rocker->wops; + + if (!wops || !wops->fini) + return; + wops->fini(rocker); + kfree(rocker->wpriv); +} + +static int rocker_world_check_init(struct rocker_port *rocker_port) +{ + struct rocker *rocker = rocker_port->rocker; + u8 mode; + int err; + + err = rocker_cmd_get_port_settings_mode(rocker_port, &mode); + if (err) { + dev_err(&rocker->pdev->dev, "failed to get port mode\n"); + return err; + } + if (rocker->wops) { + if (rocker->wops->mode != mode) { + dev_err(&rocker->pdev->dev, "hardware has ports in different worlds, which is not supported\n"); + return err; + } + return 0; + } + return rocker_world_init(rocker, mode); +} + +static int rocker_world_port_pre_init(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + int err; + + rocker_port->wpriv = kzalloc(wops->port_priv_size, GFP_KERNEL); + if (!rocker_port->wpriv) + return -ENOMEM; + if (!wops->port_pre_init) + return 0; + err = wops->port_pre_init(rocker_port); + if (err) + kfree(rocker_port->wpriv); + return 0; +} + +static int rocker_world_port_init(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_init) + return 0; + return wops->port_init(rocker_port); +} + +static void rocker_world_port_fini(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_fini) + return; + wops->port_fini(rocker_port); +} + +static void rocker_world_port_post_fini(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_post_fini) + return; + wops->port_post_fini(rocker_port); + kfree(rocker_port->wpriv); +} + +static int rocker_world_port_open(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_open) + return 0; + return wops->port_open(rocker_port); +} + +static void rocker_world_port_stop(struct rocker_port *rocker_port) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_stop) + return; + wops->port_stop(rocker_port); +} + +static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_stp_state_set) + return 0; + return wops->port_attr_stp_state_set(rocker_port, state, trans); +} + +static int +rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_flags_set) + return 0; + return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, + trans); +} + +static int +rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_flags_get) + return 0; + return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); +} + +static int +rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans) + +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_attr_bridge_ageing_time_set) + return 0; + return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time, + trans); +} + +static int +rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_add) + return 0; + return wops->port_obj_vlan_add(rocker_port, vlan, trans); +} + +static int +rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_del) + return 0; + return wops->port_obj_vlan_del(rocker_port, vlan); +} + +static int +rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_vlan_dump) + return 0; + return wops->port_obj_vlan_dump(rocker_port, vlan, cb); +} + +static int +rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fib4_add) + return 0; + return wops->port_obj_fib4_add(rocker_port, fib4, trans); +} + +static int +rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fib4_del) + return 0; + return wops->port_obj_fib4_del(rocker_port, fib4); +} + +static int +rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_add) + return 0; + return wops->port_obj_fdb_add(rocker_port, fdb, trans); +} + +static int +rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_del) + return 0; + return wops->port_obj_fdb_del(rocker_port, fdb); +} + +static int +rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_obj_fdb_dump) + return 0; + return wops->port_obj_fdb_dump(rocker_port, fdb, cb); +} + +static int rocker_world_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_master_linked) + return 0; + return wops->port_master_linked(rocker_port, master); +} + +static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_master_unlinked) + return 0; + return wops->port_master_unlinked(rocker_port, master); +} + +static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_neigh_update) + return 0; + return wops->port_neigh_update(rocker_port, n); +} + +static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_neigh_destroy) + return 0; + return wops->port_neigh_destroy(rocker_port, n); +} + +static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id) +{ + struct rocker_world_ops *wops = rocker_port->rocker->wops; + + if (!wops->port_ev_mac_vlan_seen) + return 0; + return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); +} + static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, const struct rocker_flow_tbl_entry *entry) @@ -3799,6 +4136,12 @@ static int rocker_port_open(struct net_device *dev) goto err_request_rx_irq; } + err = rocker_world_port_open(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot open port in world\n"); + goto err_world_port_open; + } + err = rocker_port_fwd_enable(rocker_port, NULL, 0); if (err) goto err_fwd_enable; @@ -3811,6 +4154,7 @@ static int rocker_port_open(struct net_device *dev) return 0; err_fwd_enable: +err_world_port_open: free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); err_request_rx_irq: free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); @@ -3827,6 +4171,7 @@ static int rocker_port_stop(struct net_device *dev) rocker_port_set_enable(rocker_port, false); napi_disable(&rocker_port->napi_rx); napi_disable(&rocker_port->napi_tx); + rocker_world_port_stop(rocker_port); rocker_port_fwd_disable(rocker_port, NULL, ROCKER_OP_FLAG_NOWAIT); free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); @@ -4037,9 +4382,14 @@ static void rocker_port_neigh_destroy(struct neighbour *n) struct rocker_port *rocker_port = netdev_priv(n->dev); int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; __be32 ip_addr = *(__be32 *)n->primary_key; + int err; rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha); + err = rocker_world_port_neigh_destroy(rocker_port, n); + if (err) + netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", + err); } static const struct net_device_ops rocker_port_netdev_ops = { @@ -4068,6 +4418,7 @@ static int rocker_port_attr_get(struct net_device *dev, { const struct rocker_port *rocker_port = netdev_priv(dev); const struct rocker *rocker = rocker_port->rocker; + int err = 0; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: @@ -4076,12 +4427,14 @@ static int rocker_port_attr_get(struct net_device *dev, break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: attr->u.brport_flags = rocker_port->brport_flags; + err = rocker_world_port_attr_bridge_flags_get(rocker_port, + &attr->u.brport_flags); break; default: return -EOPNOTSUPP; } - return 0; + return err; } static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, @@ -4125,14 +4478,29 @@ static int rocker_port_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_PORT_STP_STATE: err = rocker_port_stp_update(rocker_port, trans, 0, attr->u.stp_state); + if (err) + break; + err = rocker_world_port_attr_stp_state_set(rocker_port, + attr->u.stp_state, + trans); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: err = rocker_port_brport_flags_set(rocker_port, trans, attr->u.brport_flags); + if (err) + break; + err = rocker_world_port_attr_bridge_flags_set(rocker_port, + attr->u.brport_flags, + trans); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: err = rocker_port_bridge_ageing_time(rocker_port, trans, attr->u.ageing_time); + if (err) + break; + err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, + attr->u.ageing_time, + trans); break; default: err = -EOPNOTSUPP; @@ -4204,16 +4572,31 @@ static int rocker_port_obj_add(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_VLAN: err = rocker_port_vlans_add(rocker_port, trans, SWITCHDEV_OBJ_PORT_VLAN(obj)); + if (err) + break; + err = rocker_world_port_obj_vlan_add(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + trans); break; case SWITCHDEV_OBJ_ID_IPV4_FIB: fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); err = rocker_port_fib_ipv4(rocker_port, trans, htonl(fib4->dst), fib4->dst_len, &fib4->fi, fib4->tb_id, 0); + if (err) + break; + err = rocker_world_port_obj_fib4_add(rocker_port, + SWITCHDEV_OBJ_IPV4_FIB(obj), + trans); break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = rocker_port_fdb_add(rocker_port, trans, SWITCHDEV_OBJ_PORT_FDB(obj)); + if (err) + break; + err = rocker_world_port_obj_fdb_add(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + trans); break; default: err = -EOPNOTSUPP; @@ -4276,6 +4659,10 @@ static int rocker_port_obj_del(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_VLAN: err = rocker_port_vlans_del(rocker_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); + if (err) + break; + err = rocker_world_port_obj_vlan_del(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj)); break; case SWITCHDEV_OBJ_ID_IPV4_FIB: fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); @@ -4283,10 +4670,18 @@ static int rocker_port_obj_del(struct net_device *dev, htonl(fib4->dst), fib4->dst_len, &fib4->fi, fib4->tb_id, ROCKER_OP_FLAG_REMOVE); + if (err) + break; + err = rocker_world_port_obj_fib4_del(rocker_port, + SWITCHDEV_OBJ_IPV4_FIB(obj)); break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = rocker_port_fdb_del(rocker_port, NULL, SWITCHDEV_OBJ_PORT_FDB(obj)); + if (err) + break; + err = rocker_world_port_obj_fdb_del(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj)); break; default: err = -EOPNOTSUPP; @@ -4358,10 +4753,20 @@ static int rocker_port_obj_dump(struct net_device *dev, case SWITCHDEV_OBJ_ID_PORT_FDB: err = rocker_port_fdb_dump(rocker_port, SWITCHDEV_OBJ_PORT_FDB(obj), cb); + if (err) + break; + err = rocker_world_port_obj_fdb_dump(rocker_port, + SWITCHDEV_OBJ_PORT_FDB(obj), + cb); break; case SWITCHDEV_OBJ_ID_PORT_VLAN: err = rocker_port_vlan_dump(rocker_port, SWITCHDEV_OBJ_PORT_VLAN(obj), cb); + if (err) + break; + err = rocker_world_port_obj_vlan_dump(rocker_port, + SWITCHDEV_OBJ_PORT_VLAN(obj), + cb); break; default: err = -EOPNOTSUPP; @@ -4687,7 +5092,7 @@ static void rocker_carrier_init(const struct rocker_port *rocker_port) netif_carrier_off(rocker_port->dev); } -static void rocker_remove_ports(const struct rocker *rocker) +static void rocker_remove_ports(struct rocker *rocker) { struct rocker_port *rocker_port; int i; @@ -4697,9 +5102,12 @@ static void rocker_remove_ports(const struct rocker *rocker) if (!rocker_port) continue; rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); + rocker_world_port_fini(rocker_port); unregister_netdev(rocker_port->dev); + rocker_world_port_post_fini(rocker_port); free_netdev(rocker_port->dev); } + rocker_world_fini(rocker); kfree(rocker->ports); } @@ -4736,6 +5144,12 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME; + err = rocker_world_check_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "world init failed\n"); + goto err_world_check_init; + } + rocker_port_dev_addr_init(rocker_port); dev->netdev_ops = &rocker_port_netdev_ops; dev->ethtool_ops = &rocker_port_ethtool_ops; @@ -4748,6 +5162,11 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG; + err = rocker_world_port_pre_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "port world pre-init failed\n"); + goto err_world_port_pre_init; + } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "register_netdev failed\n"); @@ -4755,6 +5174,12 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) } rocker->ports[port_number] = rocker_port; + err = rocker_world_port_init(rocker_port); + if (err) { + dev_err(&pdev->dev, "port world init failed\n"); + goto err_world_port_init; + } + switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); rocker_port_set_learning(rocker_port, NULL); @@ -4779,9 +5204,14 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) err_untagged_vlan: rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); err_port_ig_tbl: + rocker_world_port_fini(rocker_port); +err_world_port_init: rocker->ports[port_number] = NULL; unregister_netdev(dev); err_register_netdev: + rocker_world_port_post_fini(rocker_port); +err_world_port_pre_init: +err_world_check_init: free_netdev(dev); return err; } @@ -5132,12 +5562,22 @@ static int rocker_netdevice_event(struct notifier_block *unused, goto out; rocker_port = netdev_priv(dev); if (info->linking) { + err = rocker_world_port_master_linked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master linked (err %d)\n", + err); err = rocker_port_master_linked(rocker_port, info->upper_dev); if (err) netdev_warn(dev, "failed to reflect master linked (err %d)\n", err); } else { + err = rocker_world_port_master_unlinked(rocker_port, + info->upper_dev); + if (err) + netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", + err); err = rocker_port_master_unlinked(rocker_port); if (err) netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", @@ -5170,6 +5610,7 @@ static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) static int rocker_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct rocker_port *rocker_port; struct net_device *dev; struct neighbour *n = ptr; int err; @@ -5181,6 +5622,11 @@ static int rocker_netevent_event(struct notifier_block *unused, dev = n->dev; if (!rocker_port_dev_check(dev)) return NOTIFY_DONE; + rocker_port = netdev_priv(dev); + err = rocker_world_port_neigh_update(rocker_port, n); + if (err) + netdev_warn(dev, "failed to handle neigh update (err %d)\n", + err); err = rocker_neigh_update(dev, n); if (err) netdev_warn(dev, diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c new file mode 100644 index 000000000000..155dc534a228 --- /dev/null +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -0,0 +1,27 @@ +/* + * drivers/net/ethernet/rocker/rocker_ofdpa.c - Rocker switch OF-DPA-like + * implementation + * Copyright (c) 2014 Scott Feldman + * Copyright (c) 2014-2016 Jiri Pirko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include "rocker.h" + +struct ofdpa { +}; + +struct ofdpa_port { +}; + +struct rocker_world_ops rocker_ofdpa_ops = { + .kind = "ofdpa", + .priv_size = sizeof(struct ofdpa), + .port_priv_size = sizeof(struct ofdpa_port), + .mode = ROCKER_PORT_MODE_OF_DPA, +}; -- cgit v1.2.3 From c1fe922e15ee73b0dd9fe6a9b08bdc204a7d7428 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:46 +0100 Subject: rocker: pass "learning" value as a parameter to rocker_port_set_learning Be consistent with the rest of the setting functions, and pass "learning" as a bool function parameter. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 871ccbe107fc..0015dcbf83e4 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1583,6 +1583,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, struct rocker_desc_info *desc_info, void *priv) { + bool learning = *(bool *)priv; struct rocker_tlv *cmd_info; if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, @@ -1595,7 +1596,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, rocker_port->pport)) return -EMSGSIZE; if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING, - !!(rocker_port->brport_flags & BR_LEARNING))) + learning)) return -EMSGSIZE; rocker_tlv_nest_end(desc_info, cmd_info); return 0; @@ -1652,11 +1653,12 @@ static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, } static int rocker_port_set_learning(struct rocker_port *rocker_port, - struct switchdev_trans *trans) + struct switchdev_trans *trans, + bool learning) { return rocker_cmd_exec(rocker_port, trans, 0, rocker_cmd_set_port_learning_prep, - NULL, NULL, NULL); + &learning, NULL, NULL); } /********************** @@ -4447,7 +4449,8 @@ static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, orig_flags = rocker_port->brport_flags; rocker_port->brport_flags = brport_flags; if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING) - err = rocker_port_set_learning(rocker_port, trans); + err = rocker_port_set_learning(rocker_port, trans, + !!(rocker_port->brport_flags & BR_LEARNING)); if (switchdev_trans_ph_prepare(trans)) rocker_port->brport_flags = orig_flags; @@ -5182,7 +5185,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); - rocker_port_set_learning(rocker_port, NULL); + rocker_port_set_learning(rocker_port, NULL, + !!(rocker_port->brport_flags & BR_LEARNING)); err = rocker_port_ig_tbl(rocker_port, NULL, 0); if (err) { -- cgit v1.2.3 From ca0a5f2a39cbc3bcbbcc651e82758fa135ac1b9b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:47 +0100 Subject: rocker: pre-allocate wait structures during cmd ring init This avoids need to alloc/free wait structure for every command call. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 87 +++++++++++++++++++++++-------- 1 file changed, 66 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 0015dcbf83e4..8585d983675c 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -338,23 +338,19 @@ static void rocker_wait_init(struct rocker_wait *wait) rocker_wait_reset(wait); } -static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags) +static struct rocker_wait *rocker_wait_create(void) { struct rocker_wait *wait; - wait = rocker_kzalloc(trans, flags, sizeof(*wait)); + wait = kzalloc(sizeof(*wait), GFP_KERNEL); if (!wait) return NULL; - rocker_wait_init(wait); return wait; } -static void rocker_wait_destroy(struct switchdev_trans *trans, - struct rocker_wait *wait) +static void rocker_wait_destroy(struct rocker_wait *wait) { - rocker_kfree(trans, wait); + kfree(wait); } static bool rocker_wait_event_timeout(struct rocker_wait *wait, @@ -831,6 +827,53 @@ static void rocker_dma_ring_bufs_free(const struct rocker *rocker, } } +static int rocker_dma_cmd_ring_wait_alloc(struct rocker_desc_info *desc_info) +{ + struct rocker_wait *wait; + + wait = rocker_wait_create(); + if (!wait) + return -ENOMEM; + rocker_desc_cookie_ptr_set(desc_info, wait); + return 0; +} + +static void +rocker_dma_cmd_ring_wait_free(const struct rocker_desc_info *desc_info) +{ + struct rocker_wait *wait = rocker_desc_cookie_ptr_get(desc_info); + + rocker_wait_destroy(wait); +} + +static int rocker_dma_cmd_ring_waits_alloc(const struct rocker *rocker) +{ + const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; + int i; + int err; + + for (i = 0; i < cmd_ring->size; i++) { + err = rocker_dma_cmd_ring_wait_alloc(&cmd_ring->desc_info[i]); + if (err) + goto rollback; + } + return 0; + +rollback: + for (i--; i >= 0; i--) + rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); + return err; +} + +static void rocker_dma_cmd_ring_waits_free(const struct rocker *rocker) +{ + const struct rocker_dma_ring_info *cmd_ring = &rocker->cmd_ring; + int i; + + for (i = 0; i < cmd_ring->size; i++) + rocker_dma_cmd_ring_wait_free(&cmd_ring->desc_info[i]); +} + static int rocker_dma_rings_init(struct rocker *rocker) { const struct pci_dev *pdev = rocker->pdev; @@ -853,6 +896,12 @@ static int rocker_dma_rings_init(struct rocker *rocker) goto err_dma_cmd_ring_bufs_alloc; } + err = rocker_dma_cmd_ring_waits_alloc(rocker); + if (err) { + dev_err(&pdev->dev, "failed to alloc command dma ring waits\n"); + goto err_dma_cmd_ring_waits_alloc; + } + err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT, ROCKER_DMA_EVENT_DEFAULT_SIZE, &rocker->event_ring); @@ -875,6 +924,8 @@ err_dma_event_ring_bufs_alloc: err_dma_event_ring_create: rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); +err_dma_cmd_ring_waits_alloc: + rocker_dma_cmd_ring_waits_free(rocker); err_dma_cmd_ring_bufs_alloc: rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); return err; @@ -885,6 +936,7 @@ static void rocker_dma_rings_fini(struct rocker *rocker) rocker_dma_ring_bufs_free(rocker, &rocker->event_ring, PCI_DMA_BIDIRECTIONAL); rocker_dma_ring_destroy(rocker, &rocker->event_ring); + rocker_dma_cmd_ring_waits_free(rocker); rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring, PCI_DMA_BIDIRECTIONAL); rocker_dma_ring_destroy(rocker, &rocker->cmd_ring); @@ -1106,7 +1158,6 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id) wait = rocker_desc_cookie_ptr_get(desc_info); if (wait->nowait) { rocker_desc_gen_clear(desc_info); - rocker_wait_destroy(NULL, wait); } else { rocker_wait_wake_up(wait); } @@ -1298,28 +1349,24 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port, unsigned long lock_flags; int err; - wait = rocker_wait_create(rocker_port, trans, flags); - if (!wait) - return -ENOMEM; - wait->nowait = nowait; - spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags); desc_info = rocker_desc_head_get(&rocker->cmd_ring); if (!desc_info) { spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - err = -EAGAIN; - goto out; + return -EAGAIN; } + wait = rocker_desc_cookie_ptr_get(desc_info); + rocker_wait_init(wait); + wait->nowait = nowait; + err = prepare(rocker_port, desc_info, prepare_priv); if (err) { spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); - goto out; + return err; } - rocker_desc_cookie_ptr_set(desc_info, wait); - if (!switchdev_trans_ph_prepare(trans)) rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); @@ -1340,8 +1387,6 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port, err = process(rocker_port, desc_info, process_priv); rocker_desc_gen_clear(desc_info); -out: - rocker_wait_destroy(trans, wait); return err; } -- cgit v1.2.3 From ae3907ecedb91c10ab8fd8b42ef62431206eee83 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:48 +0100 Subject: rocker: remove trans parameter to rocker_cmd_exec function The only purpose of passing this parameter is to check for prepare phase. The only reason for a failure in that state is if TLVs don't fit into descriptor. That is highly unlikely and if that happens, it is a driver bug. So remove this parameter from rocker_cmd_exec, and check for prepare phase in caller. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 65 +++++++++++++++++-------------- 1 file changed, 35 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 8585d983675c..8113b1596075 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1337,8 +1337,7 @@ typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv); -static int rocker_cmd_exec(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, +static int rocker_cmd_exec(struct rocker_port *rocker_port, int flags, rocker_cmd_prep_cb_t prepare, void *prepare_priv, rocker_cmd_proc_cb_t process, void *process_priv) { @@ -1367,17 +1366,15 @@ static int rocker_cmd_exec(struct rocker_port *rocker_port, return err; } - if (!switchdev_trans_ph_prepare(trans)) - rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); + rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info); spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags); if (nowait) return 0; - if (!switchdev_trans_ph_prepare(trans)) - if (!rocker_wait_event_timeout(wait, HZ / 10)) - return -EIO; + if (!rocker_wait_event_timeout(wait, HZ / 10)) + return -EIO; err = rocker_desc_err(desc_info); if (err) @@ -1650,7 +1647,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_ethtool_proc, ecmd); @@ -1659,7 +1656,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_macaddr_proc, macaddr); @@ -1668,7 +1665,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, u8 *p_mode) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_mode_proc, p_mode); } @@ -1676,7 +1673,7 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_set_port_settings_ethtool_prep, ecmd, NULL, NULL); } @@ -1684,7 +1681,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_set_port_settings_macaddr_prep, macaddr, NULL, NULL); } @@ -1692,16 +1689,15 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, int mtu) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_set_port_settings_mtu_prep, &mtu, NULL, NULL); } static int rocker_port_set_learning(struct rocker_port *rocker_port, - struct switchdev_trans *trans, bool learning) { - return rocker_cmd_exec(rocker_port, trans, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_set_port_learning_prep, &learning, NULL, NULL); } @@ -2578,8 +2574,11 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_add, found, NULL, NULL); + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(rocker_port, flags, + rocker_cmd_flow_tbl_add, + found, NULL, NULL); + return 0; } static int rocker_flow_tbl_del(struct rocker_port *rocker_port, @@ -2609,9 +2608,10 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, rocker_kfree(trans, match); if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_flow_tbl_del, - found, NULL, NULL); + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(rocker_port, flags, + rocker_cmd_flow_tbl_del, + found, NULL, NULL); rocker_kfree(trans, found); } @@ -2899,8 +2899,11 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - return rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_add, found, NULL, NULL); + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(rocker_port, flags, + rocker_cmd_group_tbl_add, + found, NULL, NULL); + return 0; } static int rocker_group_tbl_del(struct rocker_port *rocker_port, @@ -2927,9 +2930,10 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port, rocker_group_tbl_entry_free(trans, match); if (found) { - err = rocker_cmd_exec(rocker_port, trans, flags, - rocker_cmd_group_tbl_del, - found, NULL, NULL); + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(rocker_port, flags, + rocker_cmd_group_tbl_del, + found, NULL, NULL); rocker_group_tbl_entry_free(trans, found); } @@ -4405,7 +4409,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, struct port_name name = { .buf = buf, .len = len }; int err; - err = rocker_cmd_exec(rocker_port, NULL, 0, + err = rocker_cmd_exec(rocker_port, 0, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_phys_name_proc, &name); @@ -4493,8 +4497,9 @@ static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, orig_flags = rocker_port->brport_flags; rocker_port->brport_flags = brport_flags; - if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING) - err = rocker_port_set_learning(rocker_port, trans, + if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING && + !switchdev_trans_ph_prepare(trans)) + err = rocker_port_set_learning(rocker_port, !!(rocker_port->brport_flags & BR_LEARNING)); if (switchdev_trans_ph_prepare(trans)) @@ -4957,7 +4962,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, void *priv) { - return rocker_cmd_exec(rocker_port, NULL, 0, + return rocker_cmd_exec(rocker_port, 0, rocker_cmd_get_port_stats_prep, NULL, rocker_cmd_get_port_stats_ethtool_proc, priv); @@ -5230,7 +5235,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); - rocker_port_set_learning(rocker_port, NULL, + rocker_port_set_learning(rocker_port, !!(rocker_port->brport_flags & BR_LEARNING)); err = rocker_port_ig_tbl(rocker_port, NULL, 0); -- cgit v1.2.3 From 53901cc03a361f57127ed1ae967c72ee9e137454 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:49 +0100 Subject: rocker: call rocker_cmd_exec function with "nowait" boolean instead of flags No need to push down rocker flags just to check if this is nowait or not. Let the caller handle that. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 38 +++++++++++++++++++------------ 1 file changed, 23 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 8113b1596075..02602491dba3 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -262,6 +262,11 @@ static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) #define ROCKER_OP_FLAG_LEARNED BIT(2) #define ROCKER_OP_FLAG_REFRESH BIT(3) +static bool rocker_flags_nowait(int flags) +{ + return flags & ROCKER_OP_FLAG_NOWAIT; +} + static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags, size_t size) { @@ -1337,14 +1342,13 @@ typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, const struct rocker_desc_info *desc_info, void *priv); -static int rocker_cmd_exec(struct rocker_port *rocker_port, int flags, +static int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, rocker_cmd_prep_cb_t prepare, void *prepare_priv, rocker_cmd_proc_cb_t process, void *process_priv) { struct rocker *rocker = rocker_port->rocker; struct rocker_desc_info *desc_info; struct rocker_wait *wait; - bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT); unsigned long lock_flags; int err; @@ -1647,7 +1651,7 @@ rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_ethtool_proc, ecmd); @@ -1656,7 +1660,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_macaddr_proc, macaddr); @@ -1665,7 +1669,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, u8 *p_mode) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_mode_proc, p_mode); } @@ -1673,7 +1677,7 @@ static int rocker_cmd_get_port_settings_mode(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, struct ethtool_cmd *ecmd) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_set_port_settings_ethtool_prep, ecmd, NULL, NULL); } @@ -1681,7 +1685,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, unsigned char *macaddr) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_set_port_settings_macaddr_prep, macaddr, NULL, NULL); } @@ -1689,7 +1693,7 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port, static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, int mtu) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_set_port_settings_mtu_prep, &mtu, NULL, NULL); } @@ -1697,7 +1701,7 @@ static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, static int rocker_port_set_learning(struct rocker_port *rocker_port, bool learning) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_set_port_learning_prep, &learning, NULL, NULL); } @@ -2575,7 +2579,8 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); if (!switchdev_trans_ph_prepare(trans)) - return rocker_cmd_exec(rocker_port, flags, + return rocker_cmd_exec(rocker_port, + rocker_flags_nowait(flags), rocker_cmd_flow_tbl_add, found, NULL, NULL); return 0; @@ -2609,7 +2614,8 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port, if (found) { if (!switchdev_trans_ph_prepare(trans)) - err = rocker_cmd_exec(rocker_port, flags, + err = rocker_cmd_exec(rocker_port, + rocker_flags_nowait(flags), rocker_cmd_flow_tbl_del, found, NULL, NULL); rocker_kfree(trans, found); @@ -2900,7 +2906,8 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port, spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); if (!switchdev_trans_ph_prepare(trans)) - return rocker_cmd_exec(rocker_port, flags, + return rocker_cmd_exec(rocker_port, + rocker_flags_nowait(flags), rocker_cmd_group_tbl_add, found, NULL, NULL); return 0; @@ -2931,7 +2938,8 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port, if (found) { if (!switchdev_trans_ph_prepare(trans)) - err = rocker_cmd_exec(rocker_port, flags, + err = rocker_cmd_exec(rocker_port, + rocker_flags_nowait(flags), rocker_cmd_group_tbl_del, found, NULL, NULL); rocker_group_tbl_entry_free(trans, found); @@ -4409,7 +4417,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev, struct port_name name = { .buf = buf, .len = len }; int err; - err = rocker_cmd_exec(rocker_port, 0, + err = rocker_cmd_exec(rocker_port, false, rocker_cmd_get_port_settings_prep, NULL, rocker_cmd_get_port_settings_phys_name_proc, &name); @@ -4962,7 +4970,7 @@ rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port, static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port, void *priv) { - return rocker_cmd_exec(rocker_port, 0, + return rocker_cmd_exec(rocker_port, false, rocker_cmd_get_port_stats_prep, NULL, rocker_cmd_get_port_stats_ethtool_proc, priv); -- cgit v1.2.3 From 3fbcdbf3f1cbe86fc11856ec268470d7d7bd459b Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:50 +0100 Subject: rocker: move OF-DPA stuff into separate file Carve out OF-DPA would specific code from the common file to the world file. This change required struct rocker and struct rocker_port split into world specific struct ofdpa and struct ofdpa_port. Along with this the world specific functions and defines were renamed from prefix "rocker_" to "ofdpa_". Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.h | 53 +- drivers/net/ethernet/rocker/rocker_main.c | 3073 ++-------------------------- drivers/net/ethernet/rocker/rocker_ofdpa.c | 2925 ++++++++++++++++++++++++++ 3 files changed, 3060 insertions(+), 2991 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 5fd0e3520fd6..1ab995f7146b 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -14,8 +14,6 @@ #include #include -#include -#include #include #include #include @@ -42,34 +40,12 @@ struct rocker_dma_ring_info { struct rocker; -enum { - ROCKER_CTRL_LINK_LOCAL_MCAST, - ROCKER_CTRL_LOCAL_ARP, - ROCKER_CTRL_IPV4_MCAST, - ROCKER_CTRL_IPV6_MCAST, - ROCKER_CTRL_DFLT_BRIDGING, - ROCKER_CTRL_DFLT_OVS, - ROCKER_CTRL_MAX, -}; - -#define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00 -#define ROCKER_N_INTERNAL_VLANS 255 -#define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) -#define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS) - struct rocker_port { struct net_device *dev; - struct net_device *bridge_dev; struct rocker *rocker; void *wpriv; unsigned int port_number; u32 pport; - __be16 internal_vlan_id; - int stp_state; - u32 brport_flags; - unsigned long ageing_time; - bool ctrls[ROCKER_CTRL_MAX]; - unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN]; struct napi_struct napi_tx; struct napi_struct napi_rx; struct rocker_dma_ring_info tx_ring; @@ -92,22 +68,23 @@ struct rocker { struct rocker_dma_ring_info event_ring; struct rocker_world_ops *wops; void *wpriv; - DECLARE_HASHTABLE(flow_tbl, 16); - spinlock_t flow_tbl_lock; /* for flow tbl accesses */ - u64 flow_tbl_next_cookie; - DECLARE_HASHTABLE(group_tbl, 16); - spinlock_t group_tbl_lock; /* for group tbl accesses */ - struct timer_list fdb_cleanup_timer; - DECLARE_HASHTABLE(fdb_tbl, 16); - spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ - unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN]; - DECLARE_HASHTABLE(internal_vlan_tbl, 8); - spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ - DECLARE_HASHTABLE(neigh_tbl, 16); - spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ - u32 neigh_tbl_next_index; }; +typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv); + +typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info, + void *priv); + +int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv); + +int rocker_port_set_learning(struct rocker_port *rocker_port, + bool learning); + struct rocker_world_ops { const char *kind; size_t priv_size; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 02602491dba3..1efd8b79b029 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -16,12 +16,9 @@ #include #include #include -#include -#include #include #include #include -#include #include #include #include @@ -33,7 +30,6 @@ #include #include #include -#include #include #include #include @@ -50,281 +46,6 @@ static const struct pci_device_id rocker_pci_id_table[] = { {0, } }; -struct rocker_flow_tbl_key { - u32 priority; - enum rocker_of_dpa_table_id tbl_id; - union { - struct { - u32 in_pport; - u32 in_pport_mask; - enum rocker_of_dpa_table_id goto_tbl; - } ig_port; - struct { - u32 in_pport; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool untagged; - __be16 new_vlan_id; - } vlan; - struct { - u32 in_pport; - u32 in_pport_mask; - __be16 eth_type; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 vlan_id; - __be16 vlan_id_mask; - enum rocker_of_dpa_table_id goto_tbl; - bool copy_to_cpu; - } term_mac; - struct { - __be16 eth_type; - __be32 dst4; - __be32 dst4_mask; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - } ucast_routing; - struct { - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - int has_eth_dst; - int has_eth_dst_mask; - __be16 vlan_id; - u32 tunnel_id; - enum rocker_of_dpa_table_id goto_tbl; - u32 group_id; - bool copy_to_cpu; - } bridge; - struct { - u32 in_pport; - u32 in_pport_mask; - u8 eth_src[ETH_ALEN]; - u8 eth_src_mask[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - u8 eth_dst_mask[ETH_ALEN]; - __be16 eth_type; - __be16 vlan_id; - __be16 vlan_id_mask; - u8 ip_proto; - u8 ip_proto_mask; - u8 ip_tos; - u8 ip_tos_mask; - u32 group_id; - } acl; - }; -}; - -struct rocker_flow_tbl_entry { - struct hlist_node entry; - u32 cmd; - u64 cookie; - struct rocker_flow_tbl_key key; - size_t key_len; - u32 key_crc32; /* key */ -}; - -struct rocker_group_tbl_entry { - struct hlist_node entry; - u32 cmd; - u32 group_id; /* key */ - u16 group_count; - u32 *group_ids; - union { - struct { - u8 pop_vlan; - } l2_interface; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - u32 group_id; - } l2_rewrite; - struct { - u8 eth_src[ETH_ALEN]; - u8 eth_dst[ETH_ALEN]; - __be16 vlan_id; - bool ttl_check; - u32 group_id; - } l3_unicast; - }; -}; - -struct rocker_fdb_tbl_entry { - struct hlist_node entry; - u32 key_crc32; /* key */ - bool learned; - unsigned long touched; - struct rocker_fdb_tbl_key { - struct rocker_port *rocker_port; - u8 addr[ETH_ALEN]; - __be16 vlan_id; - } key; -}; - -struct rocker_internal_vlan_tbl_entry { - struct hlist_node entry; - int ifindex; /* key */ - u32 ref_count; - __be16 vlan_id; -}; - -struct rocker_neigh_tbl_entry { - struct hlist_node entry; - __be32 ip_addr; /* key */ - struct net_device *dev; - u32 ref_count; - u32 index; - u8 eth_dst[ETH_ALEN]; - bool ttl_check; -}; - -static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; -static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; -static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; -static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; -static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; -static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; -static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; - -/* Rocker priority levels for flow table entries. Higher - * priority match takes precedence over lower priority match. - */ - -enum { - ROCKER_PRIORITY_UNKNOWN = 0, - ROCKER_PRIORITY_IG_PORT = 1, - ROCKER_PRIORITY_VLAN = 1, - ROCKER_PRIORITY_TERM_MAC_UCAST = 0, - ROCKER_PRIORITY_TERM_MAC_MCAST = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_VLAN = 3, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, - ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, - ROCKER_PRIORITY_BRIDGING_TENANT = 3, - ROCKER_PRIORITY_ACL_CTRL = 3, - ROCKER_PRIORITY_ACL_NORMAL = 2, - ROCKER_PRIORITY_ACL_DFLT = 1, -}; - -static bool rocker_vlan_id_is_internal(__be16 vlan_id) -{ - u16 start = ROCKER_INTERNAL_VLAN_ID_BASE; - u16 end = 0xffe; - u16 _vlan_id = ntohs(vlan_id); - - return (_vlan_id >= start && _vlan_id <= end); -} - -static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port, - u16 vid, bool *pop_vlan) -{ - __be16 vlan_id; - - if (pop_vlan) - *pop_vlan = false; - vlan_id = htons(vid); - if (!vlan_id) { - vlan_id = rocker_port->internal_vlan_id; - if (pop_vlan) - *pop_vlan = true; - } - - return vlan_id; -} - -static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port, - __be16 vlan_id) -{ - if (rocker_vlan_id_is_internal(vlan_id)) - return 0; - - return ntohs(vlan_id); -} - -static bool rocker_port_is_bridged(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_bridge_master(rocker_port->bridge_dev); -} - -static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port) -{ - return rocker_port->bridge_dev && - netif_is_ovs_master(rocker_port->bridge_dev); -} - -#define ROCKER_OP_FLAG_REMOVE BIT(0) -#define ROCKER_OP_FLAG_NOWAIT BIT(1) -#define ROCKER_OP_FLAG_LEARNED BIT(2) -#define ROCKER_OP_FLAG_REFRESH BIT(3) - -static bool rocker_flags_nowait(int flags) -{ - return flags & ROCKER_OP_FLAG_NOWAIT; -} - -static void *__rocker_mem_alloc(struct switchdev_trans *trans, int flags, - size_t size) -{ - struct switchdev_trans_item *elem = NULL; - gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ? - GFP_ATOMIC : GFP_KERNEL; - - /* If in transaction prepare phase, allocate the memory - * and enqueue it on a transaction. If in transaction - * commit phase, dequeue the memory from the transaction - * rather than re-allocating the memory. The idea is the - * driver code paths for prepare and commit are identical - * so the memory allocated in the prepare phase is the - * memory used in the commit phase. - */ - - if (!trans) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - } else if (switchdev_trans_ph_prepare(trans)) { - elem = kzalloc(size + sizeof(*elem), gfp_flags); - if (!elem) - return NULL; - switchdev_trans_item_enqueue(trans, elem, kfree, elem); - } else { - elem = switchdev_trans_item_dequeue(trans); - } - - return elem ? elem + 1 : NULL; -} - -static void *rocker_kzalloc(struct switchdev_trans *trans, int flags, - size_t size) -{ - return __rocker_mem_alloc(trans, flags, size); -} - -static void *rocker_kcalloc(struct switchdev_trans *trans, int flags, - size_t n, size_t size) -{ - return __rocker_mem_alloc(trans, flags, n * size); -} - -static void rocker_kfree(struct switchdev_trans *trans, const void *mem) -{ - struct switchdev_trans_item *elem; - - /* Frees are ignored if in transaction prepare phase. The - * memory remains on the per-port list until freed in the - * commit phase. - */ - - if (switchdev_trans_ph_prepare(trans)) - return; - - elem = (struct switchdev_trans_item *) mem - 1; - kfree(elem); -} - struct rocker_wait { wait_queue_head_t wait; bool done; @@ -1216,10 +937,6 @@ static int rocker_event_link_change(const struct rocker *rocker, return 0; } -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags); static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, const unsigned char *addr, __be16 vlan_id); @@ -1231,9 +948,7 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker, unsigned int port_number; struct rocker_port *rocker_port; const unsigned char *addr; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED; __be16 vlan_id; - int err; rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info); if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] || @@ -1249,16 +964,7 @@ static int rocker_event_mac_vlan_seen(const struct rocker *rocker, return -EINVAL; rocker_port = rocker->ports[port_number]; - - err = rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); - if (err) - return err; - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - return 0; - - return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags); + return rocker_world_port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); } static int rocker_event_process(const struct rocker *rocker, @@ -1334,17 +1040,9 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id) * Command interface ********************/ -typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv); - -typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info, - void *priv); - -static int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, - rocker_cmd_prep_cb_t prepare, void *prepare_priv, - rocker_cmd_proc_cb_t process, void *process_priv) +int rocker_cmd_exec(struct rocker_port *rocker_port, bool nowait, + rocker_cmd_prep_cb_t prepare, void *prepare_priv, + rocker_cmd_proc_cb_t process, void *process_priv) { struct rocker *rocker = rocker_port->rocker; struct rocker_desc_info *desc_info; @@ -1698,8 +1396,8 @@ static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port, &mtu, NULL, NULL); } -static int rocker_port_set_learning(struct rocker_port *rocker_port, - bool learning) +int rocker_port_set_learning(struct rocker_port *rocker_port, + bool learning) { return rocker_cmd_exec(rocker_port, false, rocker_cmd_set_port_learning_prep, @@ -2035,2283 +1733,145 @@ static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); } -static int -rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) +/***************** + * Net device ops + *****************/ + +static int rocker_port_open(struct net_device *dev) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.ig_port.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.ig_port.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ig_port.goto_tbl)) - return -EMSGSIZE; + struct rocker_port *rocker_port = netdev_priv(dev); + int err; - return 0; -} + err = rocker_port_dma_rings_init(rocker_port); + if (err) + return err; -static int -rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.vlan.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.vlan.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.vlan.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.vlan.goto_tbl)) - return -EMSGSIZE; - if (entry->key.vlan.untagged && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, - entry->key.vlan.new_vlan_id)) - return -EMSGSIZE; + err = request_irq(rocker_msix_tx_vector(rocker_port), + rocker_tx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign tx irq\n"); + goto err_request_tx_irq; + } - return 0; -} + err = request_irq(rocker_msix_rx_vector(rocker_port), + rocker_rx_irq_handler, 0, + rocker_driver_name, rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot assign rx irq\n"); + goto err_request_rx_irq; + } -static int -rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.term_mac.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.term_mac.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.term_mac.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.term_mac.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.term_mac.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.term_mac.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.term_mac.vlan_id_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.term_mac.goto_tbl)) - return -EMSGSIZE; - if (entry->key.term_mac.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.term_mac.copy_to_cpu)) - return -EMSGSIZE; + err = rocker_world_port_open(rocker_port); + if (err) { + netdev_err(rocker_port->dev, "cannot open port in world\n"); + goto err_world_port_open; + } + napi_enable(&rocker_port->napi_tx); + napi_enable(&rocker_port->napi_rx); + if (!dev->proto_down) + rocker_port_set_enable(rocker_port, true); + netif_start_queue(dev); return 0; + +err_world_port_open: + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); +err_request_rx_irq: + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); +err_request_tx_irq: + rocker_port_dma_rings_fini(rocker_port); + return err; } -static int -rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) +static int rocker_port_stop(struct net_device *dev) { - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.ucast_routing.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, - entry->key.ucast_routing.dst4)) - return -EMSGSIZE; - if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, - entry->key.ucast_routing.dst4_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.ucast_routing.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.ucast_routing.group_id)) - return -EMSGSIZE; + struct rocker_port *rocker_port = netdev_priv(dev); + + netif_stop_queue(dev); + rocker_port_set_enable(rocker_port, false); + napi_disable(&rocker_port->napi_rx); + napi_disable(&rocker_port->napi_tx); + rocker_world_port_stop(rocker_port); + free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); + free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); + rocker_port_dma_rings_fini(rocker_port); return 0; } -static int -rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) +static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, + const struct rocker_desc_info *desc_info) { - if (entry->key.bridge.has_eth_dst && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.bridge.eth_dst)) - return -EMSGSIZE; - if (entry->key.bridge.has_eth_dst_mask && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.bridge.eth_dst_mask)) - return -EMSGSIZE; - if (entry->key.bridge.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.bridge.vlan_id)) - return -EMSGSIZE; - if (entry->key.bridge.tunnel_id && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, - entry->key.bridge.tunnel_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, - entry->key.bridge.goto_tbl)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.bridge.group_id)) - return -EMSGSIZE; - if (entry->key.bridge.copy_to_cpu && - rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, - entry->key.bridge.copy_to_cpu)) - return -EMSGSIZE; + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; + struct rocker_tlv *attr; + int rem; - return 0; + rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); + if (!attrs[ROCKER_TLV_TX_FRAGS]) + return; + rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { + const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; + dma_addr_t dma_handle; + size_t len; + + if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) + continue; + rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, + attr); + if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || + !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) + continue; + dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); + len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); + pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); + } } -static int -rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, - const struct rocker_flow_tbl_entry *entry) +static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + char *buf, size_t buf_len) { - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, - entry->key.acl.in_pport)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, - entry->key.acl.in_pport_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->key.acl.eth_src)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_src_mask)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->key.acl.eth_dst)) - return -EMSGSIZE; - if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, - ETH_ALEN, entry->key.acl.eth_dst_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, - entry->key.acl.eth_type)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->key.acl.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, - entry->key.acl.vlan_id_mask)) - return -EMSGSIZE; + const struct rocker *rocker = rocker_port->rocker; + struct pci_dev *pdev = rocker->pdev; + dma_addr_t dma_handle; + struct rocker_tlv *frag; - switch (ntohs(entry->key.acl.eth_type)) { - case ETH_P_IP: - case ETH_P_IPV6: - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, - entry->key.acl.ip_proto)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_PROTO_MASK, - entry->key.acl.ip_proto_mask)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, - entry->key.acl.ip_tos & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_DSCP_MASK, - entry->key.acl.ip_tos_mask & 0x3f)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, - (entry->key.acl.ip_tos & 0xc0) >> 6)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, - ROCKER_TLV_OF_DPA_IP_ECN_MASK, - (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) - return -EMSGSIZE; - break; + dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { + if (net_ratelimit()) + netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); + return -EIO; } - - if (entry->key.acl.group_id != ROCKER_GROUP_NONE && - rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->key.acl.group_id)) - return -EMSGSIZE; - + frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); + if (!frag) + goto unmap_frag; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, + dma_handle)) + goto nest_cancel; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, + buf_len)) + goto nest_cancel; + rocker_tlv_nest_end(desc_info, frag); return 0; + +nest_cancel: + rocker_tlv_nest_cancel(desc_info, frag); +unmap_frag: + pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); + return -EMSGSIZE; } -static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) +static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) { - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, - entry->key.tbl_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, - entry->key.priority)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - - switch (entry->key.tbl_id) { - case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: - err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_VLAN: - err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: - err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: - err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_BRIDGING: - err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry); - break; - case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: - err = rocker_cmd_flow_tbl_add_acl(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_flow_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, - entry->cookie)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, - struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, - ROCKER_GROUP_PORT_GET(entry->group_id))) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, - entry->l2_interface.pop_vlan)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l2_rewrite.group_id)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l2_rewrite.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l2_rewrite.eth_dst)) - return -EMSGSIZE; - if (entry->l2_rewrite.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l2_rewrite.vlan_id)) - return -EMSGSIZE; - - return 0; -} - -static int -rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - int i; - struct rocker_tlv *group_ids; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, - entry->group_count)) - return -EMSGSIZE; - - group_ids = rocker_tlv_nest_start(desc_info, - ROCKER_TLV_OF_DPA_GROUP_IDS); - if (!group_ids) - return -EMSGSIZE; - - for (i = 0; i < entry->group_count; i++) - /* Note TLV array is 1-based */ - if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) - return -EMSGSIZE; - - rocker_tlv_nest_end(desc_info, group_ids); - - return 0; -} - -static int -rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, - const struct rocker_group_tbl_entry *entry) -{ - if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, - ETH_ALEN, entry->l3_unicast.eth_src)) - return -EMSGSIZE; - if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && - rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, - ETH_ALEN, entry->l3_unicast.eth_dst)) - return -EMSGSIZE; - if (entry->l3_unicast.vlan_id && - rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, - entry->l3_unicast.vlan_id)) - return -EMSGSIZE; - if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, - entry->l3_unicast.ttl_check)) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, - entry->l3_unicast.group_id)) - return -EMSGSIZE; - - return 0; -} - -static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - int err = 0; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: - err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: - err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry); - break; - case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: - err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry); - break; - default: - err = -ENOTSUPP; - break; - } - - if (err) - return err; - - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - void *priv) -{ - const struct rocker_group_tbl_entry *entry = priv; - struct rocker_tlv *cmd_info; - - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) - return -EMSGSIZE; - cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); - if (!cmd_info) - return -EMSGSIZE; - if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, - entry->group_id)) - return -EMSGSIZE; - rocker_tlv_nest_end(desc_info, cmd_info); - - return 0; -} - -/*************************************************** - * Flow, group, FDB, internal VLAN and neigh tables - ***************************************************/ - -static int rocker_init_tbls(struct rocker *rocker) -{ - hash_init(rocker->flow_tbl); - spin_lock_init(&rocker->flow_tbl_lock); - - hash_init(rocker->group_tbl); - spin_lock_init(&rocker->group_tbl_lock); - - hash_init(rocker->fdb_tbl); - spin_lock_init(&rocker->fdb_tbl_lock); - - hash_init(rocker->internal_vlan_tbl); - spin_lock_init(&rocker->internal_vlan_tbl_lock); - - hash_init(rocker->neigh_tbl); - spin_lock_init(&rocker->neigh_tbl_lock); - - return 0; -} - -static void rocker_free_tbls(struct rocker *rocker) -{ - unsigned long flags; - struct rocker_flow_tbl_entry *flow_entry; - struct rocker_group_tbl_entry *group_entry; - struct rocker_fdb_tbl_entry *fdb_entry; - struct rocker_internal_vlan_tbl_entry *internal_vlan_entry; - struct rocker_neigh_tbl_entry *neigh_entry; - struct hlist_node *tmp; - int bkt; - - spin_lock_irqsave(&rocker->flow_tbl_lock, flags); - hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry) - hash_del(&flow_entry->entry); - spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags); - - spin_lock_irqsave(&rocker->group_tbl_lock, flags); - hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry) - hash_del(&group_entry->entry); - spin_unlock_irqrestore(&rocker->group_tbl_lock, flags); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry) - hash_del(&fdb_entry->entry); - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags); - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags); - hash_for_each_safe(rocker->internal_vlan_tbl, bkt, - tmp, internal_vlan_entry, entry) - hash_del(&internal_vlan_entry->entry); - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags); - - spin_lock_irqsave(&rocker->neigh_tbl_lock, flags); - hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry) - hash_del(&neigh_entry->entry); - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags); -} - -static struct rocker_flow_tbl_entry * -rocker_flow_tbl_find(const struct rocker *rocker, - const struct rocker_flow_tbl_entry *match) -{ - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - - hash_for_each_possible(rocker->flow_tbl, found, - entry, match->key_crc32) { - if (memcmp(&found->key, &match->key, key_len) == 0) - return found; - } - - return NULL; -} - -static int rocker_flow_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - match->cookie = found->cookie; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_kfree(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; - } else { - found = match; - found->cookie = rocker->flow_tbl_next_cookie++; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->flow_tbl, &found->entry, found->key_crc32); - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - if (!switchdev_trans_ph_prepare(trans)) - return rocker_cmd_exec(rocker_port, - rocker_flags_nowait(flags), - rocker_cmd_flow_tbl_add, - found, NULL, NULL); - return 0; -} - -static int rocker_flow_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_flow_tbl_entry *found; - size_t key_len = match->key_len ? match->key_len : sizeof(found->key); - unsigned long lock_flags; - int err = 0; - - match->key_crc32 = crc32(~0, &match->key, key_len); - - spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags); - - found = rocker_flow_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; - } - - spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags); - - rocker_kfree(trans, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - err = rocker_cmd_exec(rocker_port, - rocker_flags_nowait(flags), - rocker_cmd_flow_tbl_del, - found, NULL, NULL); - rocker_kfree(trans, found); - } - - return err; -} - -static int rocker_flow_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_flow_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_flow_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_flow_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - enum rocker_of_dpa_table_id goto_tbl) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_IG_PORT; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; - entry->key.ig_port.in_pport = in_pport; - entry->key.ig_port.in_pport_mask = in_pport_mask; - entry->key.ig_port.goto_tbl = goto_tbl; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, __be16 vlan_id, - __be16 vlan_id_mask, - enum rocker_of_dpa_table_id goto_tbl, - bool untagged, __be16 new_vlan_id) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.priority = ROCKER_PRIORITY_VLAN; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; - entry->key.vlan.in_pport = in_pport; - entry->key.vlan.vlan_id = vlan_id; - entry->key.vlan.vlan_id_mask = vlan_id_mask; - entry->key.vlan.goto_tbl = goto_tbl; - - entry->key.vlan.untagged = untagged; - entry->key.vlan.new_vlan_id = new_vlan_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 in_pport, u32 in_pport_mask, - __be16 eth_type, const u8 *eth_dst, - const u8 *eth_dst_mask, __be16 vlan_id, - __be16 vlan_id_mask, bool copy_to_cpu, - int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - if (is_multicast_ether_addr(eth_dst)) { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; - } else { - entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST; - entry->key.term_mac.goto_tbl = - ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - } - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - entry->key.term_mac.in_pport = in_pport; - entry->key.term_mac.in_pport_mask = in_pport_mask; - entry->key.term_mac.eth_type = eth_type; - ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); - ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); - entry->key.term_mac.vlan_id = vlan_id; - entry->key.term_mac.vlan_id_mask = vlan_id_mask; - entry->key.term_mac.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 vlan_id, u32 tunnel_id, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, bool copy_to_cpu) -{ - struct rocker_flow_tbl_entry *entry; - u32 priority; - bool vlan_bridging = !!vlan_id; - bool dflt = !eth_dst || (eth_dst && eth_dst_mask); - bool wild = false; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; - - if (eth_dst) { - entry->key.bridge.has_eth_dst = 1; - ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); - } - if (eth_dst_mask) { - entry->key.bridge.has_eth_dst_mask = 1; - ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); - if (!ether_addr_equal(eth_dst_mask, ff_mac)) - wild = true; - } - - priority = ROCKER_PRIORITY_UNKNOWN; - if (vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD; - else if (vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; - else if (vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_VLAN; - else if (!vlan_bridging && dflt && wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD; - else if (!vlan_bridging && dflt && !wild) - priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; - else if (!vlan_bridging && !dflt) - priority = ROCKER_PRIORITY_BRIDGING_TENANT; - - entry->key.priority = priority; - entry->key.bridge.vlan_id = vlan_id; - entry->key.bridge.tunnel_id = tunnel_id; - entry->key.bridge.goto_tbl = goto_tbl; - entry->key.bridge.group_id = group_id; - entry->key.bridge.copy_to_cpu = copy_to_cpu; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be16 eth_type, __be32 dst, - __be32 dst_mask, u32 priority, - enum rocker_of_dpa_table_id goto_tbl, - u32 group_id, int flags) -{ - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; - entry->key.priority = priority; - entry->key.ucast_routing.eth_type = eth_type; - entry->key.ucast_routing.dst4 = dst; - entry->key.ucast_routing.dst4_mask = dst_mask; - entry->key.ucast_routing.goto_tbl = goto_tbl; - entry->key.ucast_routing.group_id = group_id; - entry->key_len = offsetof(struct rocker_flow_tbl_key, - ucast_routing.group_id); - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_flow_tbl_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 in_pport, u32 in_pport_mask, - const u8 *eth_src, const u8 *eth_src_mask, - const u8 *eth_dst, const u8 *eth_dst_mask, - __be16 eth_type, __be16 vlan_id, - __be16 vlan_id_mask, u8 ip_proto, - u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, - u32 group_id) -{ - u32 priority; - struct rocker_flow_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - priority = ROCKER_PRIORITY_ACL_NORMAL; - if (eth_dst && eth_dst_mask) { - if (ether_addr_equal(eth_dst_mask, mcast_mac)) - priority = ROCKER_PRIORITY_ACL_DFLT; - else if (is_link_local_ether_addr(eth_dst)) - priority = ROCKER_PRIORITY_ACL_CTRL; - } - - entry->key.priority = priority; - entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - entry->key.acl.in_pport = in_pport; - entry->key.acl.in_pport_mask = in_pport_mask; - - if (eth_src) - ether_addr_copy(entry->key.acl.eth_src, eth_src); - if (eth_src_mask) - ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); - if (eth_dst) - ether_addr_copy(entry->key.acl.eth_dst, eth_dst); - if (eth_dst_mask) - ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); - - entry->key.acl.eth_type = eth_type; - entry->key.acl.vlan_id = vlan_id; - entry->key.acl.vlan_id_mask = vlan_id_mask; - entry->key.acl.ip_proto = ip_proto; - entry->key.acl.ip_proto_mask = ip_proto_mask; - entry->key.acl.ip_tos = ip_tos; - entry->key.acl.ip_tos_mask = ip_tos_mask; - entry->key.acl.group_id = group_id; - - return rocker_flow_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_group_tbl_entry * -rocker_group_tbl_find(const struct rocker *rocker, - const struct rocker_group_tbl_entry *match) -{ - struct rocker_group_tbl_entry *found; - - hash_for_each_possible(rocker->group_tbl, found, - entry, match->group_id) { - if (found->group_id == match->group_id) - return found; - } - - return NULL; -} - -static void rocker_group_tbl_entry_free(struct switchdev_trans *trans, - struct rocker_group_tbl_entry *entry) -{ - switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { - case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: - case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: - rocker_kfree(trans, entry->group_ids); - break; - default: - break; - } - rocker_kfree(trans, entry); -} - -static int rocker_group_tbl_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - rocker_group_tbl_entry_free(trans, found); - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; - } else { - found = match; - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; - } - - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->group_tbl, &found->entry, found->group_id); - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - if (!switchdev_trans_ph_prepare(trans)) - return rocker_cmd_exec(rocker_port, - rocker_flags_nowait(flags), - rocker_cmd_group_tbl_add, - found, NULL, NULL); - return 0; -} - -static int rocker_group_tbl_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *match) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_group_tbl_entry *found; - unsigned long lock_flags; - int err = 0; - - spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags); - - found = rocker_group_tbl_find(rocker, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; - } - - spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags); - - rocker_group_tbl_entry_free(trans, match); - - if (found) { - if (!switchdev_trans_ph_prepare(trans)) - err = rocker_cmd_exec(rocker_port, - rocker_flags_nowait(flags), - rocker_cmd_group_tbl_del, - found, NULL, NULL); - rocker_group_tbl_entry_free(trans, found); - } - - return err; -} - -static int rocker_group_tbl_do(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - struct rocker_group_tbl_entry *entry) -{ - if (flags & ROCKER_OP_FLAG_REMOVE) - return rocker_group_tbl_del(rocker_port, trans, flags, entry); - else - return rocker_group_tbl_add(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_interface(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u32 out_pport, - int pop_vlan) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - entry->l2_interface.pop_vlan = pop_vlan; - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_fan_out(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = group_id; - entry->group_count = group_count; - - entry->group_ids = rocker_kcalloc(trans, flags, - group_count, sizeof(u32)); - if (!entry->group_ids) { - rocker_kfree(trans, entry); - return -ENOMEM; - } - memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static int rocker_group_l2_flood(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, u8 group_count, - const u32 *group_ids, u32 group_id) -{ - return rocker_group_l2_fan_out(rocker_port, trans, flags, - group_count, group_ids, - group_id); -} - -static int rocker_group_l3_unicast(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u32 index, const u8 *src_mac, const u8 *dst_mac, - __be16 vlan_id, bool ttl_check, u32 pport) -{ - struct rocker_group_tbl_entry *entry; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - entry->group_id = ROCKER_GROUP_L3_UNICAST(index); - if (src_mac) - ether_addr_copy(entry->l3_unicast.eth_src, src_mac); - if (dst_mac) - ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); - entry->l3_unicast.vlan_id = vlan_id; - entry->l3_unicast.ttl_check = ttl_check; - entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); - - return rocker_group_tbl_do(rocker_port, trans, flags, entry); -} - -static struct rocker_neigh_tbl_entry * -rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr) -{ - struct rocker_neigh_tbl_entry *found; - - hash_for_each_possible(rocker->neigh_tbl, found, - entry, be32_to_cpu(ip_addr)) - if (found->ip_addr == ip_addr) - return found; - - return NULL; -} - -static void _rocker_neigh_add(struct rocker *rocker, - struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (!switchdev_trans_ph_commit(trans)) - entry->index = rocker->neigh_tbl_next_index++; - if (switchdev_trans_ph_prepare(trans)) - return; - entry->ref_count++; - hash_add(rocker->neigh_tbl, &entry->entry, - be32_to_cpu(entry->ip_addr)); -} - -static void _rocker_neigh_del(struct switchdev_trans *trans, - struct rocker_neigh_tbl_entry *entry) -{ - if (switchdev_trans_ph_prepare(trans)) - return; - if (--entry->ref_count == 0) { - hash_del(&entry->entry); - rocker_kfree(trans, entry); - } -} - -static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry, - struct switchdev_trans *trans, - const u8 *eth_dst, bool ttl_check) -{ - if (eth_dst) { - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = ttl_check; - } else if (!switchdev_trans_ph_prepare(trans)) { - entry->ref_count++; - } -} - -static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be32 ip_addr, const u8 *eth_dst) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - __be16 eth_type = htons(ETH_P_IP); - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - u32 priority = 0; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - int err = 0; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - ether_addr_copy(entry->eth_dst, eth_dst); - entry->ttl_check = true; - _rocker_neigh_add(rocker, trans, entry); - } else if (removing) { - memcpy(entry, found, sizeof(*entry)); - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, eth_dst, true); - memcpy(entry, found, sizeof(*entry)); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (err) - goto err_out; - - /* For each active neighbor, we have an L3 unicast group and - * a /32 route to the neighbor, which uses the L3 unicast - * group. The L3 unicast group can also be referred to by - * other routes' nexthops. - */ - - err = rocker_group_l3_unicast(rocker_port, trans, flags, - entry->index, - rocker_port->dev->dev_addr, - entry->eth_dst, - rocker_port->internal_vlan_id, - entry->ttl_check, - rocker_port->pport); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) L3 unicast group index %d\n", - err, entry->index); - goto err_out; - } - - if (adding || removing) { - group_id = ROCKER_GROUP_L3_UNICAST(entry->index); - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, - eth_type, ip_addr, - inet_make_mask(32), - priority, goto_tbl, - group_id, flags); - - if (err) - netdev_err(rocker_port->dev, - "Error (%d) /32 unicast route %pI4 group 0x%08x\n", - err, &entry->ip_addr, group_id); - } - -err_out: - if (!adding) - rocker_kfree(trans, entry); - - return err; -} - -static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - __be32 ip_addr) -{ - struct net_device *dev = rocker_port->dev; - struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); - int err = 0; - - if (!n) { - n = neigh_create(&arp_tbl, &ip_addr, dev); - if (IS_ERR(n)) - return IS_ERR(n); - } - - /* If the neigh is already resolved, then go ahead and - * install the entry, otherwise start the ARP process to - * resolve the neigh. - */ - - if (n->nud_state & NUD_VALID) - err = rocker_port_ipv4_neigh(rocker_port, trans, 0, - ip_addr, n->ha); - else - neigh_event_send(n, NULL); - - neigh_release(n); - return err; -} - -static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be32 ip_addr, u32 *index) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_neigh_tbl_entry *entry; - struct rocker_neigh_tbl_entry *found; - unsigned long lock_flags; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - bool updating; - bool removing; - bool resolved = true; - int err = 0; - - entry = rocker_kzalloc(trans, flags, sizeof(*entry)); - if (!entry) - return -ENOMEM; - - spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags); - - found = rocker_neigh_tbl_find(rocker, ip_addr); - if (found) - *index = found->index; - - updating = found && adding; - removing = found && !adding; - adding = !found && adding; - - if (adding) { - entry->ip_addr = ip_addr; - entry->dev = rocker_port->dev; - _rocker_neigh_add(rocker, trans, entry); - *index = entry->index; - resolved = false; - } else if (removing) { - _rocker_neigh_del(trans, found); - } else if (updating) { - _rocker_neigh_update(found, trans, NULL, false); - resolved = !is_zero_ether_addr(found->eth_dst); - } else { - err = -ENOENT; - } - - spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags); - - if (!adding) - rocker_kfree(trans, entry); - - if (err) - return err; - - /* Resolved means neigh ip_addr is resolved to neigh mac. */ - - if (!resolved) - err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr); - - return err; -} - -static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, __be16 vlan_id) -{ - struct rocker_port *p; - const struct rocker *rocker = rocker_port->rocker; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 *group_ids; - u8 group_count = 0; - int err = 0; - int i; - - group_ids = rocker_kcalloc(trans, flags, - rocker->port_count, sizeof(u32)); - if (!group_ids) - return -ENOMEM; - - /* Adjust the flood group for this VLAN. The flood group - * references an L2 interface group for each port in this - * VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (!p) - continue; - if (!rocker_port_is_bridged(p)) - continue; - if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { - group_ids[group_count++] = - ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); - } - } - - /* If there are no bridged ports in this VLAN, we're done */ - if (group_count == 0) - goto no_ports_in_vlan; - - err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id, - group_count, group_ids, group_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - -no_ports_in_vlan: - rocker_kfree(trans, group_ids); - return err; -} - -static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id, bool pop_vlan) -{ - const struct rocker *rocker = rocker_port->rocker; - struct rocker_port *p; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - u32 out_pport; - int ref = 0; - int err; - int i; - - /* An L2 interface group for this port in this VLAN, but - * only when port STP state is LEARNING|FORWARDING. - */ - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) { - out_pport = rocker_port->pport; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - /* An L2 interface group for this VLAN to CPU port. - * Add when first port joins this VLAN and destroy when - * last port leaves this VLAN. - */ - - for (i = 0; i < rocker->port_count; i++) { - p = rocker->ports[i]; - if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) - ref++; - } - - if ((!adding || ref != 1) && (adding || ref != 0)) - return 0; - - out_pport = 0; - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for CPU port\n", err); - return err; - } - - return 0; -} - -static struct rocker_ctrl { - const u8 *eth_dst; - const u8 *eth_dst_mask; - __be16 eth_type; - bool acl; - bool bridge; - bool term; - bool copy_to_cpu; -} rocker_ctrls[] = { - [ROCKER_CTRL_LINK_LOCAL_MCAST] = { - /* pass link local multicast pkts up to CPU for filtering */ - .eth_dst = ll_mac, - .eth_dst_mask = ll_mask, - .acl = true, - }, - [ROCKER_CTRL_LOCAL_ARP] = { - /* pass local ARP pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .eth_type = htons(ETH_P_ARP), - .acl = true, - }, - [ROCKER_CTRL_IPV4_MCAST] = { - /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ - .eth_dst = ipv4_mcast, - .eth_dst_mask = ipv4_mask, - .eth_type = htons(ETH_P_IP), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_IPV6_MCAST] = { - /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ - .eth_dst = ipv6_mcast, - .eth_dst_mask = ipv6_mask, - .eth_type = htons(ETH_P_IPV6), - .term = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_BRIDGING] = { - /* flood any pkts on vlan */ - .bridge = true, - .copy_to_cpu = true, - }, - [ROCKER_CTRL_DFLT_OVS] = { - /* pass all pkts up to CPU */ - .eth_dst = zero_mac, - .eth_dst_mask = zero_mac, - .acl = true, - }, -}; - -static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport = rocker_port->pport; - u32 in_pport_mask = 0xffffffff; - u32 out_pport = 0; - const u8 *eth_src = NULL; - const u8 *eth_src_mask = NULL; - __be16 vlan_id_mask = htons(0xffff); - u8 ip_proto = 0; - u8 ip_proto_mask = 0; - u8 ip_tos = 0; - u8 ip_tos_mask = 0; - u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - int err; - - err = rocker_flow_tbl_acl(rocker_port, trans, flags, - in_pport, in_pport_mask, - eth_src, eth_src_mask, - ctrl->eth_dst, ctrl->eth_dst_mask, - ctrl->eth_type, - vlan_id, vlan_id_mask, - ip_proto, ip_proto_mask, - ip_tos, ip_tos_mask, - group_id); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - int flags, - const struct rocker_ctrl *ctrl, - __be16 vlan_id) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); - u32 tunnel_id = 0; - int err; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, - ctrl->eth_dst, ctrl->eth_dst_mask, - vlan_id, tunnel_id, - goto_tbl, group_id, ctrl->copy_to_cpu); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 vlan_id_mask = htons(0xffff); - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - ctrl->eth_type, ctrl->eth_dst, - ctrl->eth_dst_mask, vlan_id, - vlan_id_mask, ctrl->copy_to_cpu, - flags); - - if (err) - netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err); - - return err; -} - -static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl, __be16 vlan_id) -{ - if (ctrl->acl) - return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags, - ctrl, vlan_id); - if (ctrl->bridge) - return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags, - ctrl, vlan_id); - - if (ctrl->term) - return rocker_port_ctrl_vlan_term(rocker_port, trans, flags, - ctrl, vlan_id); - - return -EOPNOTSUPP; -} - -static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - int err = 0; - int i; - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (rocker_port->ctrls[i]) { - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - &rocker_ctrls[i], vlan_id); - if (err) - return err; - } - } - - return err; -} - -static int rocker_port_ctrl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const struct rocker_ctrl *ctrl) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - err = rocker_port_ctrl_vlan(rocker_port, trans, flags, - ctrl, htons(vid)); - if (err) - break; - } - - return err; -} - -static int rocker_port_vlan(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, u16 vid) -{ - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; - u32 in_pport = rocker_port->pport; - __be16 vlan_id = htons(vid); - __be16 vlan_id_mask = htons(0xffff); - __be16 internal_vlan_id; - bool untagged; - bool adding = !(flags & ROCKER_OP_FLAG_REMOVE); - int err; - - internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged); - - if (adding && test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already added */ - else if (!adding && !test_bit(ntohs(internal_vlan_id), - rocker_port->vlan_bitmap)) - return 0; /* already removed */ - - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - if (adding) { - err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port ctrl vlan add\n", err); - goto err_out; - } - } - - err = rocker_port_vlan_l2_groups(rocker_port, trans, flags, - internal_vlan_id, untagged); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 groups\n", err); - goto err_out; - } - - err = rocker_port_vlan_flood_group(rocker_port, trans, flags, - internal_vlan_id); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 flood group\n", err); - goto err_out; - } - - err = rocker_flow_tbl_vlan(rocker_port, trans, flags, - in_pport, vlan_id, vlan_id_mask, - goto_tbl, untagged, internal_vlan_id); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) port VLAN table\n", err); - -err_out: - if (switchdev_trans_ph_prepare(trans)) - change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap); - - return err; -} - -static int rocker_port_ig_tbl(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - enum rocker_of_dpa_table_id goto_tbl; - u32 in_pport; - u32 in_pport_mask; - int err; - - /* Normal Ethernet Frames. Matches pkts from any local physical - * ports. Goto VLAN tbl. - */ - - in_pport = 0; - in_pport_mask = 0xffff0000; - goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; - - err = rocker_flow_tbl_ig_port(rocker_port, trans, flags, - in_pport, in_pport_mask, - goto_tbl); - if (err) - netdev_err(rocker_port->dev, - "Error (%d) ingress port table entry\n", err); - - return err; -} - -struct rocker_fdb_learn_work { - struct work_struct work; - struct rocker_port *rocker_port; - struct switchdev_trans *trans; - int flags; - u8 addr[ETH_ALEN]; - u16 vid; -}; - -static void rocker_port_fdb_learn_work(struct work_struct *work) -{ - const struct rocker_fdb_learn_work *lw = - container_of(work, struct rocker_fdb_learn_work, work); - bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE); - bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED); - struct switchdev_notifier_fdb_info info; - - info.addr = lw->addr; - info.vid = lw->vid; - - rtnl_lock(); - if (learned && removing) - call_switchdev_notifiers(SWITCHDEV_FDB_DEL, - lw->rocker_port->dev, &info.info); - else if (learned && !removing) - call_switchdev_notifiers(SWITCHDEV_FDB_ADD, - lw->rocker_port->dev, &info.info); - rtnl_unlock(); - - rocker_kfree(lw->trans, work); -} - -static int rocker_port_fdb_learn(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - const u8 *addr, __be16 vlan_id) -{ - struct rocker_fdb_learn_work *lw; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 out_pport = rocker_port->pport; - u32 tunnel_id = 0; - u32 group_id = ROCKER_GROUP_NONE; - bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC); - bool copy_to_cpu = false; - int err; - - if (rocker_port_is_bridged(rocker_port)) - group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); - - if (!(flags & ROCKER_OP_FLAG_REFRESH)) { - err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr, - NULL, vlan_id, tunnel_id, goto_tbl, - group_id, copy_to_cpu); - if (err) - return err; - } - - if (!syncing) - return 0; - - if (!rocker_port_is_bridged(rocker_port)) - return 0; - - lw = rocker_kzalloc(trans, flags, sizeof(*lw)); - if (!lw) - return -ENOMEM; - - INIT_WORK(&lw->work, rocker_port_fdb_learn_work); - - lw->rocker_port = rocker_port; - lw->trans = trans; - lw->flags = flags; - ether_addr_copy(lw->addr, addr); - lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id); - - if (switchdev_trans_ph_prepare(trans)) - rocker_kfree(trans, lw); - else - schedule_work(&lw->work); - - return 0; -} - -static struct rocker_fdb_tbl_entry * -rocker_fdb_tbl_find(const struct rocker *rocker, - const struct rocker_fdb_tbl_entry *match) -{ - struct rocker_fdb_tbl_entry *found; - - hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32) - if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) - return found; - - return NULL; -} - -static int rocker_port_fdb(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const unsigned char *addr, - __be16 vlan_id, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *fdb; - struct rocker_fdb_tbl_entry *found; - bool removing = (flags & ROCKER_OP_FLAG_REMOVE); - unsigned long lock_flags; - - fdb = rocker_kzalloc(trans, flags, sizeof(*fdb)); - if (!fdb) - return -ENOMEM; - - fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED); - fdb->touched = jiffies; - fdb->key.rocker_port = rocker_port; - ether_addr_copy(fdb->key.addr, addr); - fdb->key.vlan_id = vlan_id; - fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - found = rocker_fdb_tbl_find(rocker, fdb); - - if (found) { - found->touched = jiffies; - if (removing) { - rocker_kfree(trans, fdb); - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - } else if (!removing) { - if (!switchdev_trans_ph_prepare(trans)) - hash_add(rocker->fdb_tbl, &fdb->entry, - fdb->key_crc32); - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - /* Check if adding and already exists, or removing and can't find */ - if (!found != !removing) { - rocker_kfree(trans, fdb); - if (!found && removing) - return 0; - /* Refreshing existing to update aging timers */ - flags |= ROCKER_OP_FLAG_REFRESH; - } - - return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id); -} - -static int rocker_port_fdb_flush(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - unsigned long lock_flags; - struct hlist_node *tmp; - int bkt; - int err = 0; - - if (rocker_port->stp_state == BR_STATE_LEARNING || - rocker_port->stp_state == BR_STATE_FORWARDING) - return 0; - - flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - if (!found->learned) - continue; - err = rocker_port_fdb_learn(rocker_port, trans, flags, - found->key.addr, - found->key.vlan_id); - if (err) - goto err_out; - if (!switchdev_trans_ph_prepare(trans)) - hash_del(&found->entry); - } - -err_out: - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static void rocker_fdb_cleanup(unsigned long data) -{ - struct rocker *rocker = (struct rocker *)data; - struct rocker_port *rocker_port; - struct rocker_fdb_tbl_entry *entry; - struct hlist_node *tmp; - unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; - unsigned long expires; - unsigned long lock_flags; - int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE | - ROCKER_OP_FLAG_LEARNED; - int bkt; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) { - if (!entry->learned) - continue; - rocker_port = entry->key.rocker_port; - expires = entry->touched + rocker_port->ageing_time; - if (time_before_eq(expires, jiffies)) { - rocker_port_fdb_learn(rocker_port, NULL, - flags, entry->key.addr, - entry->key.vlan_id); - hash_del(&entry->entry); - } else if (time_before(expires, next_timer)) { - next_timer = expires; - } - } - - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer)); -} - -static int rocker_port_router_mac(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - __be16 vlan_id) -{ - u32 in_pport_mask = 0xffffffff; - __be16 eth_type; - const u8 *dst_mac_mask = ff_mac; - __be16 vlan_id_mask = htons(0xffff); - bool copy_to_cpu = false; - int err; - - if (ntohs(vlan_id) == 0) - vlan_id = rocker_port->internal_vlan_id; - - eth_type = htons(ETH_P_IP); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - if (err) - return err; - - eth_type = htons(ETH_P_IPV6); - err = rocker_flow_tbl_term_mac(rocker_port, trans, - rocker_port->pport, in_pport_mask, - eth_type, rocker_port->dev->dev_addr, - dst_mac_mask, vlan_id, vlan_id_mask, - copy_to_cpu, flags); - - return err; -} - -static int rocker_port_fwding(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - bool pop_vlan; - u32 out_pport; - __be16 vlan_id; - u16 vid; - int err; - - /* Port will be forwarding-enabled if its STP state is LEARNING - * or FORWARDING. Traffic from CPU can still egress, regardless of - * port STP state. Use L2 interface group on port VLANs as a way - * to toggle port forwarding: if forwarding is disabled, L2 - * interface group will not exist. - */ - - if (rocker_port->stp_state != BR_STATE_LEARNING && - rocker_port->stp_state != BR_STATE_FORWARDING) - flags |= ROCKER_OP_FLAG_REMOVE; - - out_pport = rocker_port->pport; - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan_id = htons(vid); - pop_vlan = rocker_vlan_id_is_internal(vlan_id); - err = rocker_group_l2_interface(rocker_port, trans, flags, - vlan_id, out_pport, pop_vlan); - if (err) { - netdev_err(rocker_port->dev, - "Error (%d) port VLAN l2 group for pport %d\n", - err, out_pport); - return err; - } - } - - return 0; -} - -static int rocker_port_stp_update(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags, - u8 state) -{ - bool want[ROCKER_CTRL_MAX] = { 0, }; - bool prev_ctrls[ROCKER_CTRL_MAX]; - u8 uninitialized_var(prev_state); - int err; - int i; - - if (switchdev_trans_ph_prepare(trans)) { - memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls)); - prev_state = rocker_port->stp_state; - } - - if (rocker_port->stp_state == state) - return 0; - - rocker_port->stp_state = state; - - switch (state) { - case BR_STATE_DISABLED: - /* port is completely disabled */ - break; - case BR_STATE_LISTENING: - case BR_STATE_BLOCKING: - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - break; - case BR_STATE_LEARNING: - case BR_STATE_FORWARDING: - if (!rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true; - want[ROCKER_CTRL_IPV4_MCAST] = true; - want[ROCKER_CTRL_IPV6_MCAST] = true; - if (rocker_port_is_bridged(rocker_port)) - want[ROCKER_CTRL_DFLT_BRIDGING] = true; - else if (rocker_port_is_ovsed(rocker_port)) - want[ROCKER_CTRL_DFLT_OVS] = true; - else - want[ROCKER_CTRL_LOCAL_ARP] = true; - break; - } - - for (i = 0; i < ROCKER_CTRL_MAX; i++) { - if (want[i] != rocker_port->ctrls[i]) { - int ctrl_flags = flags | - (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE); - err = rocker_port_ctrl(rocker_port, trans, ctrl_flags, - &rocker_ctrls[i]); - if (err) - goto err_out; - rocker_port->ctrls[i] = want[i]; - } - } - - err = rocker_port_fdb_flush(rocker_port, trans, flags); - if (err) - goto err_out; - - err = rocker_port_fwding(rocker_port, trans, flags); - -err_out: - if (switchdev_trans_ph_prepare(trans)) { - memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); - rocker_port->stp_state = prev_state; - } - - return err; -} - -static int rocker_port_fwd_enable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will enable port */ - return 0; - - /* port is not bridged, so simulate going to FORWARDING state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_FORWARDING); -} - -static int rocker_port_fwd_disable(struct rocker_port *rocker_port, - struct switchdev_trans *trans, int flags) -{ - if (rocker_port_is_bridged(rocker_port)) - /* bridge STP will disable port */ - return 0; - - /* port is not bridged, so simulate going to DISABLED state */ - return rocker_port_stp_update(rocker_port, trans, flags, - BR_STATE_DISABLED); -} - -static struct rocker_internal_vlan_tbl_entry * -rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex) -{ - struct rocker_internal_vlan_tbl_entry *found; - - hash_for_each_possible(rocker->internal_vlan_tbl, found, - entry, ifindex) { - if (found->ifindex == ifindex) - return found; - } - - return NULL; -} - -static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *entry; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - int i; - - entry = kzalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - return 0; - - entry->ifindex = ifindex; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (found) { - kfree(entry); - goto found; - } - - found = entry; - hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex); - - for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) { - if (test_and_set_bit(i, rocker->internal_vlan_bitmap)) - continue; - found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i); - goto found; - } - - netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n"); - -found: - found->ref_count++; - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); - - return found->vlan_id; -} - -static void -rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port, - int ifindex) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_internal_vlan_tbl_entry *found; - unsigned long lock_flags; - unsigned long bit; - - spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags); - - found = rocker_internal_vlan_tbl_find(rocker, ifindex); - if (!found) { - netdev_err(rocker_port->dev, - "ifindex (%d) not found in internal VLAN tbl\n", - ifindex); - goto not_found; - } - - if (--found->ref_count <= 0) { - bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE; - clear_bit(bit, rocker->internal_vlan_bitmap); - hash_del(&found->entry); - kfree(found); - } - -not_found: - spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags); -} - -static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, - struct switchdev_trans *trans, __be32 dst, - int dst_len, const struct fib_info *fi, - u32 tb_id, int flags) -{ - const struct fib_nh *nh; - __be16 eth_type = htons(ETH_P_IP); - __be32 dst_mask = inet_make_mask(dst_len); - __be16 internal_vlan_id = rocker_port->internal_vlan_id; - u32 priority = fi->fib_priority; - enum rocker_of_dpa_table_id goto_tbl = - ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; - u32 group_id; - bool nh_on_port; - bool has_gw; - u32 index; - int err; - - /* XXX support ECMP */ - - nh = fi->fib_nh; - nh_on_port = (fi->fib_dev == rocker_port->dev); - has_gw = !!nh->nh_gw; - - if (has_gw && nh_on_port) { - err = rocker_port_ipv4_nh(rocker_port, trans, flags, - nh->nh_gw, &index); - if (err) - return err; - - group_id = ROCKER_GROUP_L3_UNICAST(index); - } else { - /* Send to CPU for processing */ - group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); - } - - err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst, - dst_mask, priority, goto_tbl, - group_id, flags); - if (err) - netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n", - err, &dst); - - return err; -} - -/***************** - * Net device ops - *****************/ - -static int rocker_port_open(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int err; - - err = rocker_port_dma_rings_init(rocker_port); - if (err) - return err; - - err = request_irq(rocker_msix_tx_vector(rocker_port), - rocker_tx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign tx irq\n"); - goto err_request_tx_irq; - } - - err = request_irq(rocker_msix_rx_vector(rocker_port), - rocker_rx_irq_handler, 0, - rocker_driver_name, rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot assign rx irq\n"); - goto err_request_rx_irq; - } - - err = rocker_world_port_open(rocker_port); - if (err) { - netdev_err(rocker_port->dev, "cannot open port in world\n"); - goto err_world_port_open; - } - - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - if (err) - goto err_fwd_enable; - - napi_enable(&rocker_port->napi_tx); - napi_enable(&rocker_port->napi_rx); - if (!dev->proto_down) - rocker_port_set_enable(rocker_port, true); - netif_start_queue(dev); - return 0; - -err_fwd_enable: -err_world_port_open: - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); -err_request_rx_irq: - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); -err_request_tx_irq: - rocker_port_dma_rings_fini(rocker_port); - return err; -} - -static int rocker_port_stop(struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - - netif_stop_queue(dev); - rocker_port_set_enable(rocker_port, false); - napi_disable(&rocker_port->napi_rx); - napi_disable(&rocker_port->napi_tx); - rocker_world_port_stop(rocker_port); - rocker_port_fwd_disable(rocker_port, NULL, - ROCKER_OP_FLAG_NOWAIT); - free_irq(rocker_msix_rx_vector(rocker_port), rocker_port); - free_irq(rocker_msix_tx_vector(rocker_port), rocker_port); - rocker_port_dma_rings_fini(rocker_port); - - return 0; -} - -static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port, - const struct rocker_desc_info *desc_info) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1]; - struct rocker_tlv *attr; - int rem; - - rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info); - if (!attrs[ROCKER_TLV_TX_FRAGS]) - return; - rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) { - const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1]; - dma_addr_t dma_handle; - size_t len; - - if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG) - continue; - rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX, - attr); - if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] || - !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) - continue; - dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]); - len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]); - pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE); - } -} - -static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port, - struct rocker_desc_info *desc_info, - char *buf, size_t buf_len) -{ - const struct rocker *rocker = rocker_port->rocker; - struct pci_dev *pdev = rocker->pdev; - dma_addr_t dma_handle; - struct rocker_tlv *frag; - - dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) { - if (net_ratelimit()) - netdev_err(rocker_port->dev, "failed to dma map tx frag\n"); - return -EIO; - } - frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG); - if (!frag) - goto unmap_frag; - if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR, - dma_handle)) - goto nest_cancel; - if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN, - buf_len)) - goto nest_cancel; - rocker_tlv_nest_end(desc_info, frag); - return 0; - -nest_cancel: - rocker_tlv_nest_cancel(desc_info, frag); -unmap_frag: - pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE); - return -EMSGSIZE; -} - -static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - struct rocker *rocker = rocker_port->rocker; - struct rocker_desc_info *desc_info; - struct rocker_tlv *frags; - int i; - int err; + struct rocker_port *rocker_port = netdev_priv(dev); + struct rocker *rocker = rocker_port->rocker; + struct rocker_desc_info *desc_info; + struct rocker_tlv *frags; + int i; + int err; desc_info = rocker_desc_head_get(&rocker_port->tx_ring); if (unlikely(!desc_info)) { @@ -4439,12 +1999,8 @@ static int rocker_port_change_proto_down(struct net_device *dev, static void rocker_port_neigh_destroy(struct neighbour *n) { struct rocker_port *rocker_port = netdev_priv(n->dev); - int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; int err; - rocker_port_ipv4_neigh(rocker_port, NULL, - flags, ip_addr, n->ha); err = rocker_world_port_neigh_destroy(rocker_port, n); if (err) netdev_warn(rocker_port->dev, "failed to handle neigh destroy (err %d)\n", @@ -4485,7 +2041,6 @@ static int rocker_port_attr_get(struct net_device *dev, memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - attr->u.brport_flags = rocker_port->brport_flags; err = rocker_world_port_attr_bridge_flags_get(rocker_port, &attr->u.brport_flags); break; @@ -4496,38 +2051,6 @@ static int rocker_port_attr_get(struct net_device *dev, return err; } -static int rocker_port_brport_flags_set(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - unsigned long brport_flags) -{ - unsigned long orig_flags; - int err = 0; - - orig_flags = rocker_port->brport_flags; - rocker_port->brport_flags = brport_flags; - if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING && - !switchdev_trans_ph_prepare(trans)) - err = rocker_port_set_learning(rocker_port, - !!(rocker_port->brport_flags & BR_LEARNING)); - - if (switchdev_trans_ph_prepare(trans)) - rocker_port->brport_flags = orig_flags; - - return err; -} - -static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u32 ageing_time) -{ - if (!switchdev_trans_ph_prepare(trans)) { - rocker_port->ageing_time = clock_t_to_jiffies(ageing_time); - mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies); - } - - return 0; -} - static int rocker_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) @@ -4537,28 +2060,16 @@ static int rocker_port_attr_set(struct net_device *dev, switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: - err = rocker_port_stp_update(rocker_port, trans, 0, - attr->u.stp_state); - if (err) - break; err = rocker_world_port_attr_stp_state_set(rocker_port, attr->u.stp_state, trans); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: - err = rocker_port_brport_flags_set(rocker_port, trans, - attr->u.brport_flags); - if (err) - break; err = rocker_world_port_attr_bridge_flags_set(rocker_port, attr->u.brport_flags, trans); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = rocker_port_bridge_ageing_time(rocker_port, trans, - attr->u.ageing_time); - if (err) - break; err = rocker_world_port_attr_bridge_ageing_time_set(rocker_port, attr->u.ageing_time, trans); @@ -4571,90 +2082,25 @@ static int rocker_port_attr_set(struct net_device *dev, return err; } -static int rocker_port_vlan_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - u16 vid, u16 flags) -{ - int err; - - /* XXX deal with flags for PVID and untagged */ - - err = rocker_port_vlan(rocker_port, trans, 0, vid); - if (err) - return err; - - err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid)); - if (err) - rocker_port_vlan(rocker_port, trans, - ROCKER_OP_FLAG_REMOVE, vid); - - return err; -} - -static int rocker_port_vlans_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_add(rocker_port, trans, - vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_add(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = 0; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - static int rocker_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) { struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; int err = 0; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - if (err) - break; err = rocker_world_port_obj_vlan_add(rocker_port, SWITCHDEV_OBJ_PORT_VLAN(obj), trans); break; case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, trans, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, 0); - if (err) - break; err = rocker_world_port_obj_fib4_add(rocker_port, SWITCHDEV_OBJ_IPV4_FIB(obj), trans); break; case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_add(rocker_port, trans, - SWITCHDEV_OBJ_PORT_FDB(obj)); - if (err) - break; err = rocker_world_port_obj_fdb_add(rocker_port, SWITCHDEV_OBJ_PORT_FDB(obj), trans); @@ -4667,80 +2113,22 @@ static int rocker_port_obj_add(struct net_device *dev, return err; } -static int rocker_port_vlan_del(struct rocker_port *rocker_port, - u16 vid, u16 flags) -{ - int err; - - err = rocker_port_router_mac(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, htons(vid)); - if (err) - return err; - - return rocker_port_vlan(rocker_port, NULL, - ROCKER_OP_FLAG_REMOVE, vid); -} - -static int rocker_port_vlans_del(struct rocker_port *rocker_port, - const struct switchdev_obj_port_vlan *vlan) -{ - u16 vid; - int err; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - err = rocker_port_vlan_del(rocker_port, vid, vlan->flags); - if (err) - return err; - } - - return 0; -} - -static int rocker_port_fdb_del(struct rocker_port *rocker_port, - struct switchdev_trans *trans, - const struct switchdev_obj_port_fdb *fdb) -{ - __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL); - int flags = ROCKER_OP_FLAG_REMOVE; - - if (!rocker_port_is_bridged(rocker_port)) - return -EINVAL; - - return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags); -} - static int rocker_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj) { struct rocker_port *rocker_port = netdev_priv(dev); - const struct switchdev_obj_ipv4_fib *fib4; int err = 0; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlans_del(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj)); - if (err) - break; err = rocker_world_port_obj_vlan_del(rocker_port, SWITCHDEV_OBJ_PORT_VLAN(obj)); break; case SWITCHDEV_OBJ_ID_IPV4_FIB: - fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj); - err = rocker_port_fib_ipv4(rocker_port, NULL, - htonl(fib4->dst), fib4->dst_len, - &fib4->fi, fib4->tb_id, - ROCKER_OP_FLAG_REMOVE); - if (err) - break; err = rocker_world_port_obj_fib4_del(rocker_port, SWITCHDEV_OBJ_IPV4_FIB(obj)); break; case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_del(rocker_port, NULL, - SWITCHDEV_OBJ_PORT_FDB(obj)); - if (err) - break; err = rocker_world_port_obj_fdb_del(rocker_port, SWITCHDEV_OBJ_PORT_FDB(obj)); break; @@ -4752,57 +2140,6 @@ static int rocker_port_obj_del(struct net_device *dev, return err; } -static int rocker_port_fdb_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) -{ - struct rocker *rocker = rocker_port->rocker; - struct rocker_fdb_tbl_entry *found; - struct hlist_node *tmp; - unsigned long lock_flags; - int bkt; - int err = 0; - - spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags); - hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) { - if (found->key.rocker_port != rocker_port) - continue; - ether_addr_copy(fdb->addr, found->key.addr); - fdb->ndm_state = NUD_REACHABLE; - fdb->vid = rocker_port_vlan_to_vid(rocker_port, - found->key.vlan_id); - err = cb(&fdb->obj); - if (err) - break; - } - spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags); - - return err; -} - -static int rocker_port_vlan_dump(const struct rocker_port *rocker_port, - struct switchdev_obj_port_vlan *vlan, - switchdev_obj_dump_cb_t *cb) -{ - u16 vid; - int err = 0; - - for (vid = 1; vid < VLAN_N_VID; vid++) { - if (!test_bit(vid, rocker_port->vlan_bitmap)) - continue; - vlan->flags = 0; - if (rocker_vlan_id_is_internal(htons(vid))) - vlan->flags |= BRIDGE_VLAN_INFO_PVID; - vlan->vid_begin = vid; - vlan->vid_end = vid; - err = cb(&vlan->obj); - if (err) - break; - } - - return err; -} - static int rocker_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, switchdev_obj_dump_cb_t *cb) @@ -4812,19 +2149,11 @@ static int rocker_port_obj_dump(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_FDB: - err = rocker_port_fdb_dump(rocker_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); - if (err) - break; err = rocker_world_port_obj_fdb_dump(rocker_port, SWITCHDEV_OBJ_PORT_FDB(obj), cb); break; case SWITCHDEV_OBJ_ID_PORT_VLAN: - err = rocker_port_vlan_dump(rocker_port, - SWITCHDEV_OBJ_PORT_VLAN(obj), cb); - if (err) - break; err = rocker_world_port_obj_vlan_dump(rocker_port, SWITCHDEV_OBJ_PORT_VLAN(obj), cb); @@ -5162,7 +2491,6 @@ static void rocker_remove_ports(struct rocker *rocker) rocker_port = rocker->ports[i]; if (!rocker_port) continue; - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); rocker_world_port_fini(rocker_port); unregister_netdev(rocker_port->dev); rocker_world_port_post_fini(rocker_port); @@ -5191,7 +2519,6 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) const struct pci_dev *pdev = rocker->pdev; struct rocker_port *rocker_port; struct net_device *dev; - u16 untagged_vid = 0; int err; dev = alloc_etherdev(sizeof(struct rocker_port)); @@ -5202,8 +2529,6 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) rocker_port->rocker = rocker; rocker_port->port_number = port_number; rocker_port->pport = port_number + 1; - rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; - rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME; err = rocker_world_check_init(rocker_port); if (err) { @@ -5241,32 +2566,8 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number) goto err_world_port_init; } - switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false); - - rocker_port_set_learning(rocker_port, - !!(rocker_port->brport_flags & BR_LEARNING)); - - err = rocker_port_ig_tbl(rocker_port, NULL, 0); - if (err) { - netdev_err(rocker_port->dev, "install ig port table failed\n"); - goto err_port_ig_tbl; - } - - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex); - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) { - netdev_err(rocker_port->dev, "install untagged VLAN failed\n"); - goto err_untagged_vlan; - } - return 0; -err_untagged_vlan: - rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE); -err_port_ig_tbl: - rocker_world_port_fini(rocker_port); err_world_port_init: rocker->ports[port_number] = NULL; unregister_netdev(dev); @@ -5432,16 +2733,6 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - err = rocker_init_tbls(rocker); - if (err) { - dev_err(&pdev->dev, "cannot init rocker tables\n"); - goto err_init_tbls; - } - - setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup, - (unsigned long) rocker); - mod_timer(&rocker->fdb_cleanup_timer, jiffies); - err = rocker_probe_ports(rocker); if (err) { dev_err(&pdev->dev, "failed to probe ports\n"); @@ -5454,9 +2745,6 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; err_probe_ports: - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); -err_init_tbls: free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); err_request_event_irq: free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker); @@ -5482,8 +2770,6 @@ static void rocker_remove(struct pci_dev *pdev) { struct rocker *rocker = pci_get_drvdata(pdev); - del_timer_sync(&rocker->fdb_cleanup_timer); - rocker_free_tbls(rocker); rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET); rocker_remove_ports(rocker); free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker); @@ -5512,100 +2798,6 @@ static bool rocker_port_dev_check(const struct net_device *dev) return dev->netdev_ops == &rocker_port_netdev_ops; } -static int rocker_port_bridge_join(struct rocker_port *rocker_port, - struct net_device *bridge) -{ - u16 untagged_vid = 0; - int err; - - /* Port is joining bridge, so the internal VLAN for the - * port is going to change to the bridge internal VLAN. - * Let's remove untagged VLAN (vid=0) from port and - * re-add once internal VLAN has changed. - */ - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex); - - rocker_port->bridge_dev = bridge; - switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true); - - return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); -} - -static int rocker_port_bridge_leave(struct rocker_port *rocker_port) -{ - u16 untagged_vid = 0; - int err; - - err = rocker_port_vlan_del(rocker_port, untagged_vid, 0); - if (err) - return err; - - rocker_port_internal_vlan_id_put(rocker_port, - rocker_port->bridge_dev->ifindex); - rocker_port->internal_vlan_id = - rocker_port_internal_vlan_id_get(rocker_port, - rocker_port->dev->ifindex); - - switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev, - false); - rocker_port->bridge_dev = NULL; - - err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0); - if (err) - return err; - - if (rocker_port->dev->flags & IFF_UP) - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - -static int rocker_port_ovs_changed(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err; - - rocker_port->bridge_dev = master; - - err = rocker_port_fwd_disable(rocker_port, NULL, 0); - if (err) - return err; - err = rocker_port_fwd_enable(rocker_port, NULL, 0); - - return err; -} - -static int rocker_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) -{ - int err = 0; - - if (netif_is_bridge_master(master)) - err = rocker_port_bridge_join(rocker_port, master); - else if (netif_is_ovs_master(master)) - err = rocker_port_ovs_changed(rocker_port, master); - return err; -} - -static int rocker_port_master_unlinked(struct rocker_port *rocker_port) -{ - int err = 0; - - if (rocker_port_is_bridged(rocker_port)) - err = rocker_port_bridge_leave(rocker_port); - else if (rocker_port_is_ovsed(rocker_port)) - err = rocker_port_ovs_changed(rocker_port, NULL); - return err; -} - static int rocker_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -5629,23 +2821,13 @@ static int rocker_netdevice_event(struct notifier_block *unused, if (err) netdev_warn(dev, "failed to reflect master linked (err %d)\n", err); - err = rocker_port_master_linked(rocker_port, - info->upper_dev); - if (err) - netdev_warn(dev, "failed to reflect master linked (err %d)\n", - err); } else { err = rocker_world_port_master_unlinked(rocker_port, info->upper_dev); if (err) netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", err); - err = rocker_port_master_unlinked(rocker_port); - if (err) - netdev_warn(dev, "failed to reflect master unlinked (err %d)\n", - err); } - break; } out: return NOTIFY_DONE; @@ -5659,16 +2841,6 @@ static struct notifier_block rocker_netdevice_nb __read_mostly = { * Net event notifier event handler ************************************/ -static int rocker_neigh_update(struct net_device *dev, struct neighbour *n) -{ - struct rocker_port *rocker_port = netdev_priv(dev); - int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) | - ROCKER_OP_FLAG_NOWAIT; - __be32 ip_addr = *(__be32 *)n->primary_key; - - return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha); -} - static int rocker_netevent_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -5689,11 +2861,6 @@ static int rocker_netevent_event(struct notifier_block *unused, if (err) netdev_warn(dev, "failed to handle neigh update (err %d)\n", err); - err = rocker_neigh_update(dev, n); - if (err) - netdev_warn(dev, - "failed to handle neigh update (err %d)\n", - err); break; } diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 155dc534a228..099008a53b03 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -11,17 +11,2942 @@ */ #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "rocker.h" +#include "rocker_tlv.h" + +struct ofdpa_flow_tbl_key { + u32 priority; + enum rocker_of_dpa_table_id tbl_id; + union { + struct { + u32 in_pport; + u32 in_pport_mask; + enum rocker_of_dpa_table_id goto_tbl; + } ig_port; + struct { + u32 in_pport; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool untagged; + __be16 new_vlan_id; + } vlan; + struct { + u32 in_pport; + u32 in_pport_mask; + __be16 eth_type; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 vlan_id; + __be16 vlan_id_mask; + enum rocker_of_dpa_table_id goto_tbl; + bool copy_to_cpu; + } term_mac; + struct { + __be16 eth_type; + __be32 dst4; + __be32 dst4_mask; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + } ucast_routing; + struct { + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + int has_eth_dst; + int has_eth_dst_mask; + __be16 vlan_id; + u32 tunnel_id; + enum rocker_of_dpa_table_id goto_tbl; + u32 group_id; + bool copy_to_cpu; + } bridge; + struct { + u32 in_pport; + u32 in_pport_mask; + u8 eth_src[ETH_ALEN]; + u8 eth_src_mask[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + u8 eth_dst_mask[ETH_ALEN]; + __be16 eth_type; + __be16 vlan_id; + __be16 vlan_id_mask; + u8 ip_proto; + u8 ip_proto_mask; + u8 ip_tos; + u8 ip_tos_mask; + u32 group_id; + } acl; + }; +}; + +struct ofdpa_flow_tbl_entry { + struct hlist_node entry; + u32 cmd; + u64 cookie; + struct ofdpa_flow_tbl_key key; + size_t key_len; + u32 key_crc32; /* key */ +}; + +struct ofdpa_group_tbl_entry { + struct hlist_node entry; + u32 cmd; + u32 group_id; /* key */ + u16 group_count; + u32 *group_ids; + union { + struct { + u8 pop_vlan; + } l2_interface; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + u32 group_id; + } l2_rewrite; + struct { + u8 eth_src[ETH_ALEN]; + u8 eth_dst[ETH_ALEN]; + __be16 vlan_id; + bool ttl_check; + u32 group_id; + } l3_unicast; + }; +}; + +struct ofdpa_fdb_tbl_entry { + struct hlist_node entry; + u32 key_crc32; /* key */ + bool learned; + unsigned long touched; + struct ofdpa_fdb_tbl_key { + struct ofdpa_port *ofdpa_port; + u8 addr[ETH_ALEN]; + __be16 vlan_id; + } key; +}; + +struct ofdpa_internal_vlan_tbl_entry { + struct hlist_node entry; + int ifindex; /* key */ + u32 ref_count; + __be16 vlan_id; +}; + +struct ofdpa_neigh_tbl_entry { + struct hlist_node entry; + __be32 ip_addr; /* key */ + struct net_device *dev; + u32 ref_count; + u32 index; + u8 eth_dst[ETH_ALEN]; + bool ttl_check; +}; + +enum { + OFDPA_CTRL_LINK_LOCAL_MCAST, + OFDPA_CTRL_LOCAL_ARP, + OFDPA_CTRL_IPV4_MCAST, + OFDPA_CTRL_IPV6_MCAST, + OFDPA_CTRL_DFLT_BRIDGING, + OFDPA_CTRL_DFLT_OVS, + OFDPA_CTRL_MAX, +}; + +#define OFDPA_INTERNAL_VLAN_ID_BASE 0x0f00 +#define OFDPA_N_INTERNAL_VLANS 255 +#define OFDPA_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID) +#define OFDPA_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(OFDPA_N_INTERNAL_VLANS) +#define OFDPA_UNTAGGED_VID 0 struct ofdpa { + struct rocker *rocker; + DECLARE_HASHTABLE(flow_tbl, 16); + spinlock_t flow_tbl_lock; /* for flow tbl accesses */ + u64 flow_tbl_next_cookie; + DECLARE_HASHTABLE(group_tbl, 16); + spinlock_t group_tbl_lock; /* for group tbl accesses */ + struct timer_list fdb_cleanup_timer; + DECLARE_HASHTABLE(fdb_tbl, 16); + spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */ + unsigned long internal_vlan_bitmap[OFDPA_INTERNAL_VLAN_BITMAP_LEN]; + DECLARE_HASHTABLE(internal_vlan_tbl, 8); + spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */ + DECLARE_HASHTABLE(neigh_tbl, 16); + spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ + u32 neigh_tbl_next_index; }; struct ofdpa_port { + struct ofdpa *ofdpa; + struct rocker_port *rocker_port; + struct net_device *dev; + u32 pport; + struct net_device *bridge_dev; + __be16 internal_vlan_id; + int stp_state; + u32 brport_flags; + unsigned long ageing_time; + bool ctrls[OFDPA_CTRL_MAX]; + unsigned long vlan_bitmap[OFDPA_VLAN_BITMAP_LEN]; }; +static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; +static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; +static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 }; +static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; +static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; +static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; +static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; + +/* Rocker priority levels for flow table entries. Higher + * priority match takes precedence over lower priority match. + */ + +enum { + OFDPA_PRIORITY_UNKNOWN = 0, + OFDPA_PRIORITY_IG_PORT = 1, + OFDPA_PRIORITY_VLAN = 1, + OFDPA_PRIORITY_TERM_MAC_UCAST = 0, + OFDPA_PRIORITY_TERM_MAC_MCAST = 1, + OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1, + OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2, + OFDPA_PRIORITY_BRIDGING_VLAN = 3, + OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1, + OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2, + OFDPA_PRIORITY_BRIDGING_TENANT = 3, + OFDPA_PRIORITY_ACL_CTRL = 3, + OFDPA_PRIORITY_ACL_NORMAL = 2, + OFDPA_PRIORITY_ACL_DFLT = 1, +}; + +static bool ofdpa_vlan_id_is_internal(__be16 vlan_id) +{ + u16 start = OFDPA_INTERNAL_VLAN_ID_BASE; + u16 end = 0xffe; + u16 _vlan_id = ntohs(vlan_id); + + return (_vlan_id >= start && _vlan_id <= end); +} + +static __be16 ofdpa_port_vid_to_vlan(const struct ofdpa_port *ofdpa_port, + u16 vid, bool *pop_vlan) +{ + __be16 vlan_id; + + if (pop_vlan) + *pop_vlan = false; + vlan_id = htons(vid); + if (!vlan_id) { + vlan_id = ofdpa_port->internal_vlan_id; + if (pop_vlan) + *pop_vlan = true; + } + + return vlan_id; +} + +static u16 ofdpa_port_vlan_to_vid(const struct ofdpa_port *ofdpa_port, + __be16 vlan_id) +{ + if (ofdpa_vlan_id_is_internal(vlan_id)) + return 0; + + return ntohs(vlan_id); +} + +static bool ofdpa_port_is_slave(const struct ofdpa_port *ofdpa_port, + const char *kind) +{ + return ofdpa_port->bridge_dev && + !strcmp(ofdpa_port->bridge_dev->rtnl_link_ops->kind, kind); +} + +static bool ofdpa_port_is_bridged(const struct ofdpa_port *ofdpa_port) +{ + return ofdpa_port_is_slave(ofdpa_port, "bridge"); +} + +static bool ofdpa_port_is_ovsed(const struct ofdpa_port *ofdpa_port) +{ + return ofdpa_port_is_slave(ofdpa_port, "openvswitch"); +} + +#define OFDPA_OP_FLAG_REMOVE BIT(0) +#define OFDPA_OP_FLAG_NOWAIT BIT(1) +#define OFDPA_OP_FLAG_LEARNED BIT(2) +#define OFDPA_OP_FLAG_REFRESH BIT(3) + +static bool ofdpa_flags_nowait(int flags) +{ + return flags & OFDPA_OP_FLAG_NOWAIT; +} + +static void *__ofdpa_mem_alloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + struct switchdev_trans_item *elem = NULL; + gfp_t gfp_flags = (flags & OFDPA_OP_FLAG_NOWAIT) ? + GFP_ATOMIC : GFP_KERNEL; + + /* If in transaction prepare phase, allocate the memory + * and enqueue it on a transaction. If in transaction + * commit phase, dequeue the memory from the transaction + * rather than re-allocating the memory. The idea is the + * driver code paths for prepare and commit are identical + * so the memory allocated in the prepare phase is the + * memory used in the commit phase. + */ + + if (!trans) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + } else if (switchdev_trans_ph_prepare(trans)) { + elem = kzalloc(size + sizeof(*elem), gfp_flags); + if (!elem) + return NULL; + switchdev_trans_item_enqueue(trans, elem, kfree, elem); + } else { + elem = switchdev_trans_item_dequeue(trans); + } + + return elem ? elem + 1 : NULL; +} + +static void *ofdpa_kzalloc(struct switchdev_trans *trans, int flags, + size_t size) +{ + return __ofdpa_mem_alloc(trans, flags, size); +} + +static void *ofdpa_kcalloc(struct switchdev_trans *trans, int flags, + size_t n, size_t size) +{ + return __ofdpa_mem_alloc(trans, flags, n * size); +} + +static void ofdpa_kfree(struct switchdev_trans *trans, const void *mem) +{ + struct switchdev_trans_item *elem; + + /* Frees are ignored if in transaction prepare phase. The + * memory remains on the per-port list until freed in the + * commit phase. + */ + + if (switchdev_trans_ph_prepare(trans)) + return; + + elem = (struct switchdev_trans_item *) mem - 1; + kfree(elem); +} + +/************************************************************* + * Flow, group, FDB, internal VLAN and neigh command prepares + *************************************************************/ + +static int +ofdpa_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.ig_port.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.ig_port.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ig_port.goto_tbl)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.vlan.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.vlan.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.vlan.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.vlan.goto_tbl)) + return -EMSGSIZE; + if (entry->key.vlan.untagged && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID, + entry->key.vlan.new_vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.term_mac.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.term_mac.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.term_mac.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.term_mac.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.term_mac.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.term_mac.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.term_mac.vlan_id_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.term_mac.goto_tbl)) + return -EMSGSIZE; + if (entry->key.term_mac.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.term_mac.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.ucast_routing.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP, + entry->key.ucast_routing.dst4)) + return -EMSGSIZE; + if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK, + entry->key.ucast_routing.dst4_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.ucast_routing.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.ucast_routing.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (entry->key.bridge.has_eth_dst && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.bridge.eth_dst)) + return -EMSGSIZE; + if (entry->key.bridge.has_eth_dst_mask && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.bridge.eth_dst_mask)) + return -EMSGSIZE; + if (entry->key.bridge.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.bridge.vlan_id)) + return -EMSGSIZE; + if (entry->key.bridge.tunnel_id && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID, + entry->key.bridge.tunnel_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID, + entry->key.bridge.goto_tbl)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.bridge.group_id)) + return -EMSGSIZE; + if (entry->key.bridge.copy_to_cpu && + rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION, + entry->key.bridge.copy_to_cpu)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info, + const struct ofdpa_flow_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT, + entry->key.acl.in_pport)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK, + entry->key.acl.in_pport_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->key.acl.eth_src)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_src_mask)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->key.acl.eth_dst)) + return -EMSGSIZE; + if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK, + ETH_ALEN, entry->key.acl.eth_dst_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE, + entry->key.acl.eth_type)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->key.acl.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK, + entry->key.acl.vlan_id_mask)) + return -EMSGSIZE; + + switch (ntohs(entry->key.acl.eth_type)) { + case ETH_P_IP: + case ETH_P_IPV6: + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO, + entry->key.acl.ip_proto)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_PROTO_MASK, + entry->key.acl.ip_proto_mask)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP, + entry->key.acl.ip_tos & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_DSCP_MASK, + entry->key.acl.ip_tos_mask & 0x3f)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN, + (entry->key.acl.ip_tos & 0xc0) >> 6)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, + ROCKER_TLV_OF_DPA_IP_ECN_MASK, + (entry->key.acl.ip_tos_mask & 0xc0) >> 6)) + return -EMSGSIZE; + break; + } + + if (entry->key.acl.group_id != ROCKER_GROUP_NONE && + rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->key.acl.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int ofdpa_cmd_flow_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID, + entry->key.tbl_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY, + entry->key.priority)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0)) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + + switch (entry->key.tbl_id) { + case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT: + err = ofdpa_cmd_flow_tbl_add_ig_port(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_VLAN: + err = ofdpa_cmd_flow_tbl_add_vlan(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC: + err = ofdpa_cmd_flow_tbl_add_term_mac(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING: + err = ofdpa_cmd_flow_tbl_add_ucast_routing(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_BRIDGING: + err = ofdpa_cmd_flow_tbl_add_bridge(desc_info, entry); + break; + case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY: + err = ofdpa_cmd_flow_tbl_add_acl(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int ofdpa_cmd_flow_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_flow_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE, + entry->cookie)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info, + struct ofdpa_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT, + ROCKER_GROUP_PORT_GET(entry->group_id))) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN, + entry->l2_interface.pop_vlan)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l2_rewrite.group_id)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l2_rewrite.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l2_rewrite.eth_dst)) + return -EMSGSIZE; + if (entry->l2_rewrite.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l2_rewrite.vlan_id)) + return -EMSGSIZE; + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + int i; + struct rocker_tlv *group_ids; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT, + entry->group_count)) + return -EMSGSIZE; + + group_ids = rocker_tlv_nest_start(desc_info, + ROCKER_TLV_OF_DPA_GROUP_IDS); + if (!group_ids) + return -EMSGSIZE; + + for (i = 0; i < entry->group_count; i++) + /* Note TLV array is 1-based */ + if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i])) + return -EMSGSIZE; + + rocker_tlv_nest_end(desc_info, group_ids); + + return 0; +} + +static int +ofdpa_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info, + const struct ofdpa_group_tbl_entry *entry) +{ + if (!is_zero_ether_addr(entry->l3_unicast.eth_src) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC, + ETH_ALEN, entry->l3_unicast.eth_src)) + return -EMSGSIZE; + if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) && + rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC, + ETH_ALEN, entry->l3_unicast.eth_dst)) + return -EMSGSIZE; + if (entry->l3_unicast.vlan_id && + rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID, + entry->l3_unicast.vlan_id)) + return -EMSGSIZE; + if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK, + entry->l3_unicast.ttl_check)) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER, + entry->l3_unicast.group_id)) + return -EMSGSIZE; + + return 0; +} + +static int ofdpa_cmd_group_tbl_add(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + struct ofdpa_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + int err = 0; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE: + err = ofdpa_cmd_group_tbl_add_l2_interface(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE: + err = ofdpa_cmd_group_tbl_add_l2_rewrite(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + err = ofdpa_cmd_group_tbl_add_group_ids(desc_info, entry); + break; + case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST: + err = ofdpa_cmd_group_tbl_add_l3_unicast(desc_info, entry); + break; + default: + err = -ENOTSUPP; + break; + } + + if (err) + return err; + + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +static int ofdpa_cmd_group_tbl_del(const struct rocker_port *rocker_port, + struct rocker_desc_info *desc_info, + void *priv) +{ + const struct ofdpa_group_tbl_entry *entry = priv; + struct rocker_tlv *cmd_info; + + if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd)) + return -EMSGSIZE; + cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO); + if (!cmd_info) + return -EMSGSIZE; + if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID, + entry->group_id)) + return -EMSGSIZE; + rocker_tlv_nest_end(desc_info, cmd_info); + + return 0; +} + +/*************************************************** + * Flow, group, FDB, internal VLAN and neigh tables + ***************************************************/ + +static struct ofdpa_flow_tbl_entry * +ofdpa_flow_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + + hash_for_each_possible(ofdpa->flow_tbl, found, + entry, match->key_crc32) { + if (memcmp(&found->key, &match->key, key_len) == 0) + return found; + } + + return NULL; +} + +static int ofdpa_flow_tbl_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); + + found = ofdpa_flow_tbl_find(ofdpa, match); + + if (found) { + match->cookie = found->cookie; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + ofdpa_kfree(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD; + } else { + found = match; + found->cookie = ofdpa->flow_tbl_next_cookie++; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->flow_tbl, &found->entry, found->key_crc32); + + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); + + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_flow_tbl_add, + found, NULL, NULL); + return 0; +} + +static int ofdpa_flow_tbl_del(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_flow_tbl_entry *found; + size_t key_len = match->key_len ? match->key_len : sizeof(found->key); + unsigned long lock_flags; + int err = 0; + + match->key_crc32 = crc32(~0, &match->key, key_len); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, lock_flags); + + found = ofdpa_flow_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL; + } + + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, lock_flags); + + ofdpa_kfree(trans, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_flow_tbl_del, + found, NULL, NULL); + ofdpa_kfree(trans, found); + } + + return err; +} + +static int ofdpa_flow_tbl_do(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_flow_tbl_entry *entry) +{ + if (flags & OFDPA_OP_FLAG_REMOVE) + return ofdpa_flow_tbl_del(ofdpa_port, trans, flags, entry); + else + return ofdpa_flow_tbl_add(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_ig_port(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + enum rocker_of_dpa_table_id goto_tbl) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = OFDPA_PRIORITY_IG_PORT; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT; + entry->key.ig_port.in_pport = in_pport; + entry->key.ig_port.in_pport_mask = in_pport_mask; + entry->key.ig_port.goto_tbl = goto_tbl; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, __be16 vlan_id, + __be16 vlan_id_mask, + enum rocker_of_dpa_table_id goto_tbl, + bool untagged, __be16 new_vlan_id) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.priority = OFDPA_PRIORITY_VLAN; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN; + entry->key.vlan.in_pport = in_pport; + entry->key.vlan.vlan_id = vlan_id; + entry->key.vlan.vlan_id_mask = vlan_id_mask; + entry->key.vlan.goto_tbl = goto_tbl; + + entry->key.vlan.untagged = untagged; + entry->key.vlan.new_vlan_id = new_vlan_id; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_term_mac(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + u32 in_pport, u32 in_pport_mask, + __be16 eth_type, const u8 *eth_dst, + const u8 *eth_dst_mask, __be16 vlan_id, + __be16 vlan_id_mask, bool copy_to_cpu, + int flags) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + if (is_multicast_ether_addr(eth_dst)) { + entry->key.priority = OFDPA_PRIORITY_TERM_MAC_MCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING; + } else { + entry->key.priority = OFDPA_PRIORITY_TERM_MAC_UCAST; + entry->key.term_mac.goto_tbl = + ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + } + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + entry->key.term_mac.in_pport = in_pport; + entry->key.term_mac.in_pport_mask = in_pport_mask; + entry->key.term_mac.eth_type = eth_type; + ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst); + ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask); + entry->key.term_mac.vlan_id = vlan_id; + entry->key.term_mac.vlan_id_mask = vlan_id_mask; + entry->key.term_mac.copy_to_cpu = copy_to_cpu; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_bridge(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 vlan_id, u32 tunnel_id, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, bool copy_to_cpu) +{ + struct ofdpa_flow_tbl_entry *entry; + u32 priority; + bool vlan_bridging = !!vlan_id; + bool dflt = !eth_dst || (eth_dst && eth_dst_mask); + bool wild = false; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING; + + if (eth_dst) { + entry->key.bridge.has_eth_dst = 1; + ether_addr_copy(entry->key.bridge.eth_dst, eth_dst); + } + if (eth_dst_mask) { + entry->key.bridge.has_eth_dst_mask = 1; + ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask); + if (!ether_addr_equal(eth_dst_mask, ff_mac)) + wild = true; + } + + priority = OFDPA_PRIORITY_UNKNOWN; + if (vlan_bridging && dflt && wild) + priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_WILD; + else if (vlan_bridging && dflt && !wild) + priority = OFDPA_PRIORITY_BRIDGING_VLAN_DFLT_EXACT; + else if (vlan_bridging && !dflt) + priority = OFDPA_PRIORITY_BRIDGING_VLAN; + else if (!vlan_bridging && dflt && wild) + priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_WILD; + else if (!vlan_bridging && dflt && !wild) + priority = OFDPA_PRIORITY_BRIDGING_TENANT_DFLT_EXACT; + else if (!vlan_bridging && !dflt) + priority = OFDPA_PRIORITY_BRIDGING_TENANT; + + entry->key.priority = priority; + entry->key.bridge.vlan_id = vlan_id; + entry->key.bridge.tunnel_id = tunnel_id; + entry->key.bridge.goto_tbl = goto_tbl; + entry->key.bridge.group_id = group_id; + entry->key.bridge.copy_to_cpu = copy_to_cpu; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_ucast4_routing(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + __be16 eth_type, __be32 dst, + __be32 dst_mask, u32 priority, + enum rocker_of_dpa_table_id goto_tbl, + u32 group_id, int flags) +{ + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING; + entry->key.priority = priority; + entry->key.ucast_routing.eth_type = eth_type; + entry->key.ucast_routing.dst4 = dst; + entry->key.ucast_routing.dst4_mask = dst_mask; + entry->key.ucast_routing.goto_tbl = goto_tbl; + entry->key.ucast_routing.group_id = group_id; + entry->key_len = offsetof(struct ofdpa_flow_tbl_key, + ucast_routing.group_id); + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_flow_tbl_acl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 in_pport, u32 in_pport_mask, + const u8 *eth_src, const u8 *eth_src_mask, + const u8 *eth_dst, const u8 *eth_dst_mask, + __be16 eth_type, __be16 vlan_id, + __be16 vlan_id_mask, u8 ip_proto, + u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask, + u32 group_id) +{ + u32 priority; + struct ofdpa_flow_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + priority = OFDPA_PRIORITY_ACL_NORMAL; + if (eth_dst && eth_dst_mask) { + if (ether_addr_equal(eth_dst_mask, mcast_mac)) + priority = OFDPA_PRIORITY_ACL_DFLT; + else if (is_link_local_ether_addr(eth_dst)) + priority = OFDPA_PRIORITY_ACL_CTRL; + } + + entry->key.priority = priority; + entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + entry->key.acl.in_pport = in_pport; + entry->key.acl.in_pport_mask = in_pport_mask; + + if (eth_src) + ether_addr_copy(entry->key.acl.eth_src, eth_src); + if (eth_src_mask) + ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask); + if (eth_dst) + ether_addr_copy(entry->key.acl.eth_dst, eth_dst); + if (eth_dst_mask) + ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask); + + entry->key.acl.eth_type = eth_type; + entry->key.acl.vlan_id = vlan_id; + entry->key.acl.vlan_id_mask = vlan_id_mask; + entry->key.acl.ip_proto = ip_proto; + entry->key.acl.ip_proto_mask = ip_proto_mask; + entry->key.acl.ip_tos = ip_tos; + entry->key.acl.ip_tos_mask = ip_tos_mask; + entry->key.acl.group_id = group_id; + + return ofdpa_flow_tbl_do(ofdpa_port, trans, flags, entry); +} + +static struct ofdpa_group_tbl_entry * +ofdpa_group_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa_group_tbl_entry *found; + + hash_for_each_possible(ofdpa->group_tbl, found, + entry, match->group_id) { + if (found->group_id == match->group_id) + return found; + } + + return NULL; +} + +static void ofdpa_group_tbl_entry_free(struct switchdev_trans *trans, + struct ofdpa_group_tbl_entry *entry) +{ + switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) { + case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD: + case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST: + ofdpa_kfree(trans, entry->group_ids); + break; + default: + break; + } + ofdpa_kfree(trans, entry); +} + +static int ofdpa_group_tbl_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_group_tbl_entry *found; + unsigned long lock_flags; + + spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); + + found = ofdpa_group_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + ofdpa_group_tbl_entry_free(trans, found); + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD; + } else { + found = match; + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD; + } + + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->group_tbl, &found->entry, found->group_id); + + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); + + if (!switchdev_trans_ph_prepare(trans)) + return rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_group_tbl_add, + found, NULL, NULL); + return 0; +} + +static int ofdpa_group_tbl_del(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *match) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_group_tbl_entry *found; + unsigned long lock_flags; + int err = 0; + + spin_lock_irqsave(&ofdpa->group_tbl_lock, lock_flags); + + found = ofdpa_group_tbl_find(ofdpa, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL; + } + + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, lock_flags); + + ofdpa_group_tbl_entry_free(trans, match); + + if (found) { + if (!switchdev_trans_ph_prepare(trans)) + err = rocker_cmd_exec(ofdpa_port->rocker_port, + ofdpa_flags_nowait(flags), + ofdpa_cmd_group_tbl_del, + found, NULL, NULL); + ofdpa_group_tbl_entry_free(trans, found); + } + + return err; +} + +static int ofdpa_group_tbl_do(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + struct ofdpa_group_tbl_entry *entry) +{ + if (flags & OFDPA_OP_FLAG_REMOVE) + return ofdpa_group_tbl_del(ofdpa_port, trans, flags, entry); + else + return ofdpa_group_tbl_add(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_interface(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u32 out_pport, + int pop_vlan) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + entry->l2_interface.pop_vlan = pop_vlan; + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_fan_out(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = group_id; + entry->group_count = group_count; + + entry->group_ids = ofdpa_kcalloc(trans, flags, + group_count, sizeof(u32)); + if (!entry->group_ids) { + ofdpa_kfree(trans, entry); + return -ENOMEM; + } + memcpy(entry->group_ids, group_ids, group_count * sizeof(u32)); + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static int ofdpa_group_l2_flood(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, u8 group_count, + const u32 *group_ids, u32 group_id) +{ + return ofdpa_group_l2_fan_out(ofdpa_port, trans, flags, + group_count, group_ids, + group_id); +} + +static int ofdpa_group_l3_unicast(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + u32 index, const u8 *src_mac, const u8 *dst_mac, + __be16 vlan_id, bool ttl_check, u32 pport) +{ + struct ofdpa_group_tbl_entry *entry; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + entry->group_id = ROCKER_GROUP_L3_UNICAST(index); + if (src_mac) + ether_addr_copy(entry->l3_unicast.eth_src, src_mac); + if (dst_mac) + ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac); + entry->l3_unicast.vlan_id = vlan_id; + entry->l3_unicast.ttl_check = ttl_check; + entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport); + + return ofdpa_group_tbl_do(ofdpa_port, trans, flags, entry); +} + +static struct ofdpa_neigh_tbl_entry * +ofdpa_neigh_tbl_find(const struct ofdpa *ofdpa, __be32 ip_addr) +{ + struct ofdpa_neigh_tbl_entry *found; + + hash_for_each_possible(ofdpa->neigh_tbl, found, + entry, be32_to_cpu(ip_addr)) + if (found->ip_addr == ip_addr) + return found; + + return NULL; +} + +static void ofdpa_neigh_add(struct ofdpa *ofdpa, + struct switchdev_trans *trans, + struct ofdpa_neigh_tbl_entry *entry) +{ + if (!switchdev_trans_ph_commit(trans)) + entry->index = ofdpa->neigh_tbl_next_index++; + if (switchdev_trans_ph_prepare(trans)) + return; + entry->ref_count++; + hash_add(ofdpa->neigh_tbl, &entry->entry, + be32_to_cpu(entry->ip_addr)); +} + +static void ofdpa_neigh_del(struct switchdev_trans *trans, + struct ofdpa_neigh_tbl_entry *entry) +{ + if (switchdev_trans_ph_prepare(trans)) + return; + if (--entry->ref_count == 0) { + hash_del(&entry->entry); + ofdpa_kfree(trans, entry); + } +} + +static void ofdpa_neigh_update(struct ofdpa_neigh_tbl_entry *entry, + struct switchdev_trans *trans, + const u8 *eth_dst, bool ttl_check) +{ + if (eth_dst) { + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = ttl_check; + } else if (!switchdev_trans_ph_prepare(trans)) { + entry->ref_count++; + } +} + +static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, __be32 ip_addr, const u8 *eth_dst) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_neigh_tbl_entry *entry; + struct ofdpa_neigh_tbl_entry *found; + unsigned long lock_flags; + __be16 eth_type = htons(ETH_P_IP); + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + u32 priority = 0; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + bool updating; + bool removing; + int err = 0; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); + + found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = ofdpa_port->dev; + ether_addr_copy(entry->eth_dst, eth_dst); + entry->ttl_check = true; + ofdpa_neigh_add(ofdpa, trans, entry); + } else if (removing) { + memcpy(entry, found, sizeof(*entry)); + ofdpa_neigh_del(trans, found); + } else if (updating) { + ofdpa_neigh_update(found, trans, eth_dst, true); + memcpy(entry, found, sizeof(*entry)); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); + + if (err) + goto err_out; + + /* For each active neighbor, we have an L3 unicast group and + * a /32 route to the neighbor, which uses the L3 unicast + * group. The L3 unicast group can also be referred to by + * other routes' nexthops. + */ + + err = ofdpa_group_l3_unicast(ofdpa_port, trans, flags, + entry->index, + ofdpa_port->dev->dev_addr, + entry->eth_dst, + ofdpa_port->internal_vlan_id, + entry->ttl_check, + ofdpa_port->pport); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) L3 unicast group index %d\n", + err, entry->index); + goto err_out; + } + + if (adding || removing) { + group_id = ROCKER_GROUP_L3_UNICAST(entry->index); + err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, + eth_type, ip_addr, + inet_make_mask(32), + priority, goto_tbl, + group_id, flags); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) /32 unicast route %pI4 group 0x%08x\n", + err, &entry->ip_addr, group_id); + } + +err_out: + if (!adding) + ofdpa_kfree(trans, entry); + + return err; +} + +static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + __be32 ip_addr) +{ + struct net_device *dev = ofdpa_port->dev; + struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr); + int err = 0; + + if (!n) { + n = neigh_create(&arp_tbl, &ip_addr, dev); + if (IS_ERR(n)) + return IS_ERR(n); + } + + /* If the neigh is already resolved, then go ahead and + * install the entry, otherwise start the ARP process to + * resolve the neigh. + */ + + if (n->nud_state & NUD_VALID) + err = ofdpa_port_ipv4_neigh(ofdpa_port, trans, 0, + ip_addr, n->ha); + else + neigh_event_send(n, NULL); + + neigh_release(n); + return err; +} + +static int ofdpa_port_ipv4_nh(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be32 ip_addr, u32 *index) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_neigh_tbl_entry *entry; + struct ofdpa_neigh_tbl_entry *found; + unsigned long lock_flags; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + bool updating; + bool removing; + bool resolved = true; + int err = 0; + + entry = ofdpa_kzalloc(trans, flags, sizeof(*entry)); + if (!entry) + return -ENOMEM; + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, lock_flags); + + found = ofdpa_neigh_tbl_find(ofdpa, ip_addr); + if (found) + *index = found->index; + + updating = found && adding; + removing = found && !adding; + adding = !found && adding; + + if (adding) { + entry->ip_addr = ip_addr; + entry->dev = ofdpa_port->dev; + ofdpa_neigh_add(ofdpa, trans, entry); + *index = entry->index; + resolved = false; + } else if (removing) { + ofdpa_neigh_del(trans, found); + } else if (updating) { + ofdpa_neigh_update(found, trans, NULL, false); + resolved = !is_zero_ether_addr(found->eth_dst); + } else { + err = -ENOENT; + } + + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, lock_flags); + + if (!adding) + ofdpa_kfree(trans, entry); + + if (err) + return err; + + /* Resolved means neigh ip_addr is resolved to neigh mac. */ + + if (!resolved) + err = ofdpa_port_ipv4_resolve(ofdpa_port, trans, ip_addr); + + return err; +} + +static struct ofdpa_port *ofdpa_port_get(const struct ofdpa *ofdpa, + int port_index) +{ + struct rocker_port *rocker_port; + + rocker_port = ofdpa->rocker->ports[port_index]; + return rocker_port ? rocker_port->wpriv : NULL; +} + +static int ofdpa_port_vlan_flood_group(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, __be16 vlan_id) +{ + struct ofdpa_port *p; + const struct ofdpa *ofdpa = ofdpa_port->ofdpa; + unsigned int port_count = ofdpa->rocker->port_count; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 *group_ids; + u8 group_count = 0; + int err = 0; + int i; + + group_ids = ofdpa_kcalloc(trans, flags, port_count, sizeof(u32)); + if (!group_ids) + return -ENOMEM; + + /* Adjust the flood group for this VLAN. The flood group + * references an L2 interface group for each port in this + * VLAN. + */ + + for (i = 0; i < port_count; i++) { + p = ofdpa_port_get(ofdpa, i); + if (!p) + continue; + if (!ofdpa_port_is_bridged(p)) + continue; + if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) { + group_ids[group_count++] = + ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport); + } + } + + /* If there are no bridged ports in this VLAN, we're done */ + if (group_count == 0) + goto no_ports_in_vlan; + + err = ofdpa_group_l2_flood(ofdpa_port, trans, flags, vlan_id, + group_count, group_ids, group_id); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); + +no_ports_in_vlan: + ofdpa_kfree(trans, group_ids); + return err; +} + +static int ofdpa_port_vlan_l2_groups(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id, bool pop_vlan) +{ + const struct ofdpa *ofdpa = ofdpa_port->ofdpa; + unsigned int port_count = ofdpa->rocker->port_count; + struct ofdpa_port *p; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + u32 out_pport; + int ref = 0; + int err; + int i; + + /* An L2 interface group for this port in this VLAN, but + * only when port STP state is LEARNING|FORWARDING. + */ + + if (ofdpa_port->stp_state == BR_STATE_LEARNING || + ofdpa_port->stp_state == BR_STATE_FORWARDING) { + out_pport = ofdpa_port->pport; + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + /* An L2 interface group for this VLAN to CPU port. + * Add when first port joins this VLAN and destroy when + * last port leaves this VLAN. + */ + + for (i = 0; i < port_count; i++) { + p = ofdpa_port_get(ofdpa, i); + if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap)) + ref++; + } + + if ((!adding || ref != 1) && (adding || ref != 0)) + return 0; + + out_pport = 0; + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for CPU port\n", err); + return err; + } + + return 0; +} + +static struct ofdpa_ctrl { + const u8 *eth_dst; + const u8 *eth_dst_mask; + __be16 eth_type; + bool acl; + bool bridge; + bool term; + bool copy_to_cpu; +} ofdpa_ctrls[] = { + [OFDPA_CTRL_LINK_LOCAL_MCAST] = { + /* pass link local multicast pkts up to CPU for filtering */ + .eth_dst = ll_mac, + .eth_dst_mask = ll_mask, + .acl = true, + }, + [OFDPA_CTRL_LOCAL_ARP] = { + /* pass local ARP pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .eth_type = htons(ETH_P_ARP), + .acl = true, + }, + [OFDPA_CTRL_IPV4_MCAST] = { + /* pass IPv4 mcast pkts up to CPU, RFC 1112 */ + .eth_dst = ipv4_mcast, + .eth_dst_mask = ipv4_mask, + .eth_type = htons(ETH_P_IP), + .term = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_IPV6_MCAST] = { + /* pass IPv6 mcast pkts up to CPU, RFC 2464 */ + .eth_dst = ipv6_mcast, + .eth_dst_mask = ipv6_mask, + .eth_type = htons(ETH_P_IPV6), + .term = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_DFLT_BRIDGING] = { + /* flood any pkts on vlan */ + .bridge = true, + .copy_to_cpu = true, + }, + [OFDPA_CTRL_DFLT_OVS] = { + /* pass all pkts up to CPU */ + .eth_dst = zero_mac, + .eth_dst_mask = zero_mac, + .acl = true, + }, +}; + +static int ofdpa_port_ctrl_vlan_acl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport = ofdpa_port->pport; + u32 in_pport_mask = 0xffffffff; + u32 out_pport = 0; + const u8 *eth_src = NULL; + const u8 *eth_src_mask = NULL; + __be16 vlan_id_mask = htons(0xffff); + u8 ip_proto = 0; + u8 ip_proto_mask = 0; + u8 ip_tos = 0; + u8 ip_tos_mask = 0; + u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + int err; + + err = ofdpa_flow_tbl_acl(ofdpa_port, trans, flags, + in_pport, in_pport_mask, + eth_src, eth_src_mask, + ctrl->eth_dst, ctrl->eth_dst_mask, + ctrl->eth_type, + vlan_id, vlan_id_mask, + ip_proto, ip_proto_mask, + ip_tos, ip_tos_mask, + group_id); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl ACL\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan_bridge(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, + const struct ofdpa_ctrl *ctrl, + __be16 vlan_id) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0); + u32 tunnel_id = 0; + int err; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return 0; + + err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, + ctrl->eth_dst, ctrl->eth_dst_mask, + vlan_id, tunnel_id, + goto_tbl, group_id, ctrl->copy_to_cpu); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl FLOOD\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan_term(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 vlan_id_mask = htons(0xffff); + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = ofdpa_port->internal_vlan_id; + + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + ctrl->eth_type, ctrl->eth_dst, + ctrl->eth_dst_mask, vlan_id, + vlan_id_mask, ctrl->copy_to_cpu, + flags); + + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ctrl term\n", err); + + return err; +} + +static int ofdpa_port_ctrl_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl, __be16 vlan_id) +{ + if (ctrl->acl) + return ofdpa_port_ctrl_vlan_acl(ofdpa_port, trans, flags, + ctrl, vlan_id); + if (ctrl->bridge) + return ofdpa_port_ctrl_vlan_bridge(ofdpa_port, trans, flags, + ctrl, vlan_id); + + if (ctrl->term) + return ofdpa_port_ctrl_vlan_term(ofdpa_port, trans, flags, + ctrl, vlan_id); + + return -EOPNOTSUPP; +} + +static int ofdpa_port_ctrl_vlan_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + int err = 0; + int i; + + for (i = 0; i < OFDPA_CTRL_MAX; i++) { + if (ofdpa_port->ctrls[i]) { + err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, + &ofdpa_ctrls[i], vlan_id); + if (err) + return err; + } + } + + return err; +} + +static int ofdpa_port_ctrl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const struct ofdpa_ctrl *ctrl) +{ + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + err = ofdpa_port_ctrl_vlan(ofdpa_port, trans, flags, + ctrl, htons(vid)); + if (err) + break; + } + + return err; +} + +static int ofdpa_port_vlan(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, u16 vid) +{ + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC; + u32 in_pport = ofdpa_port->pport; + __be16 vlan_id = htons(vid); + __be16 vlan_id_mask = htons(0xffff); + __be16 internal_vlan_id; + bool untagged; + bool adding = !(flags & OFDPA_OP_FLAG_REMOVE); + int err; + + internal_vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, vid, &untagged); + + if (adding && + test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) + return 0; /* already added */ + else if (!adding && + !test_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap)) + return 0; /* already removed */ + + change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); + + if (adding) { + err = ofdpa_port_ctrl_vlan_add(ofdpa_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port ctrl vlan add\n", err); + goto err_out; + } + } + + err = ofdpa_port_vlan_l2_groups(ofdpa_port, trans, flags, + internal_vlan_id, untagged); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 groups\n", err); + goto err_out; + } + + err = ofdpa_port_vlan_flood_group(ofdpa_port, trans, flags, + internal_vlan_id); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 flood group\n", err); + goto err_out; + } + + err = ofdpa_flow_tbl_vlan(ofdpa_port, trans, flags, + in_pport, vlan_id, vlan_id_mask, + goto_tbl, untagged, internal_vlan_id); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN table\n", err); + +err_out: + if (switchdev_trans_ph_prepare(trans)) + change_bit(ntohs(internal_vlan_id), ofdpa_port->vlan_bitmap); + + return err; +} + +static int ofdpa_port_ig_tbl(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + enum rocker_of_dpa_table_id goto_tbl; + u32 in_pport; + u32 in_pport_mask; + int err; + + /* Normal Ethernet Frames. Matches pkts from any local physical + * ports. Goto VLAN tbl. + */ + + in_pport = 0; + in_pport_mask = 0xffff0000; + goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN; + + err = ofdpa_flow_tbl_ig_port(ofdpa_port, trans, flags, + in_pport, in_pport_mask, + goto_tbl); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) ingress port table entry\n", err); + + return err; +} + +struct ofdpa_fdb_learn_work { + struct work_struct work; + struct ofdpa_port *ofdpa_port; + struct switchdev_trans *trans; + int flags; + u8 addr[ETH_ALEN]; + u16 vid; +}; + +static void ofdpa_port_fdb_learn_work(struct work_struct *work) +{ + const struct ofdpa_fdb_learn_work *lw = + container_of(work, struct ofdpa_fdb_learn_work, work); + bool removing = (lw->flags & OFDPA_OP_FLAG_REMOVE); + bool learned = (lw->flags & OFDPA_OP_FLAG_LEARNED); + struct switchdev_notifier_fdb_info info; + + info.addr = lw->addr; + info.vid = lw->vid; + + rtnl_lock(); + if (learned && removing) + call_switchdev_notifiers(SWITCHDEV_FDB_DEL, + lw->ofdpa_port->dev, &info.info); + else if (learned && !removing) + call_switchdev_notifiers(SWITCHDEV_FDB_ADD, + lw->ofdpa_port->dev, &info.info); + rtnl_unlock(); + + ofdpa_kfree(lw->trans, work); +} + +static int ofdpa_port_fdb_learn(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + const u8 *addr, __be16 vlan_id) +{ + struct ofdpa_fdb_learn_work *lw; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 out_pport = ofdpa_port->pport; + u32 tunnel_id = 0; + u32 group_id = ROCKER_GROUP_NONE; + bool syncing = !!(ofdpa_port->brport_flags & BR_LEARNING_SYNC); + bool copy_to_cpu = false; + int err; + + if (ofdpa_port_is_bridged(ofdpa_port)) + group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport); + + if (!(flags & OFDPA_OP_FLAG_REFRESH)) { + err = ofdpa_flow_tbl_bridge(ofdpa_port, trans, flags, addr, + NULL, vlan_id, tunnel_id, goto_tbl, + group_id, copy_to_cpu); + if (err) + return err; + } + + if (!syncing) + return 0; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return 0; + + lw = ofdpa_kzalloc(trans, flags, sizeof(*lw)); + if (!lw) + return -ENOMEM; + + INIT_WORK(&lw->work, ofdpa_port_fdb_learn_work); + + lw->ofdpa_port = ofdpa_port; + lw->trans = trans; + lw->flags = flags; + ether_addr_copy(lw->addr, addr); + lw->vid = ofdpa_port_vlan_to_vid(ofdpa_port, vlan_id); + + if (switchdev_trans_ph_prepare(trans)) + ofdpa_kfree(trans, lw); + else + schedule_work(&lw->work); + + return 0; +} + +static struct ofdpa_fdb_tbl_entry * +ofdpa_fdb_tbl_find(const struct ofdpa *ofdpa, + const struct ofdpa_fdb_tbl_entry *match) +{ + struct ofdpa_fdb_tbl_entry *found; + + hash_for_each_possible(ofdpa->fdb_tbl, found, entry, match->key_crc32) + if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0) + return found; + + return NULL; +} + +static int ofdpa_port_fdb(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + const unsigned char *addr, + __be16 vlan_id, int flags) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *fdb; + struct ofdpa_fdb_tbl_entry *found; + bool removing = (flags & OFDPA_OP_FLAG_REMOVE); + unsigned long lock_flags; + + fdb = ofdpa_kzalloc(trans, flags, sizeof(*fdb)); + if (!fdb) + return -ENOMEM; + + fdb->learned = (flags & OFDPA_OP_FLAG_LEARNED); + fdb->touched = jiffies; + fdb->key.ofdpa_port = ofdpa_port; + ether_addr_copy(fdb->key.addr, addr); + fdb->key.vlan_id = vlan_id; + fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key)); + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + found = ofdpa_fdb_tbl_find(ofdpa, fdb); + + if (found) { + found->touched = jiffies; + if (removing) { + ofdpa_kfree(trans, fdb); + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + } else if (!removing) { + if (!switchdev_trans_ph_prepare(trans)) + hash_add(ofdpa->fdb_tbl, &fdb->entry, + fdb->key_crc32); + } + + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + /* Check if adding and already exists, or removing and can't find */ + if (!found != !removing) { + ofdpa_kfree(trans, fdb); + if (!found && removing) + return 0; + /* Refreshing existing to update aging timers */ + flags |= OFDPA_OP_FLAG_REFRESH; + } + + return ofdpa_port_fdb_learn(ofdpa_port, trans, flags, addr, vlan_id); +} + +static int ofdpa_port_fdb_flush(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *found; + unsigned long lock_flags; + struct hlist_node *tmp; + int bkt; + int err = 0; + + if (ofdpa_port->stp_state == BR_STATE_LEARNING || + ofdpa_port->stp_state == BR_STATE_FORWARDING) + return 0; + + flags |= OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.ofdpa_port != ofdpa_port) + continue; + if (!found->learned) + continue; + err = ofdpa_port_fdb_learn(ofdpa_port, trans, flags, + found->key.addr, + found->key.vlan_id); + if (err) + goto err_out; + if (!switchdev_trans_ph_prepare(trans)) + hash_del(&found->entry); + } + +err_out: + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + return err; +} + +static void ofdpa_fdb_cleanup(unsigned long data) +{ + struct ofdpa *ofdpa = (struct ofdpa *)data; + struct ofdpa_port *ofdpa_port; + struct ofdpa_fdb_tbl_entry *entry; + struct hlist_node *tmp; + unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; + unsigned long expires; + unsigned long lock_flags; + int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | + OFDPA_OP_FLAG_LEARNED; + int bkt; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, entry, entry) { + if (!entry->learned) + continue; + ofdpa_port = entry->key.ofdpa_port; + expires = entry->touched + ofdpa_port->ageing_time; + if (time_before_eq(expires, jiffies)) { + ofdpa_port_fdb_learn(ofdpa_port, NULL, + flags, entry->key.addr, + entry->key.vlan_id); + hash_del(&entry->entry); + } else if (time_before(expires, next_timer)) { + next_timer = expires; + } + } + + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + mod_timer(&ofdpa->fdb_cleanup_timer, round_jiffies_up(next_timer)); +} + +static int ofdpa_port_router_mac(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags, + __be16 vlan_id) +{ + u32 in_pport_mask = 0xffffffff; + __be16 eth_type; + const u8 *dst_mac_mask = ff_mac; + __be16 vlan_id_mask = htons(0xffff); + bool copy_to_cpu = false; + int err; + + if (ntohs(vlan_id) == 0) + vlan_id = ofdpa_port->internal_vlan_id; + + eth_type = htons(ETH_P_IP); + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + eth_type, ofdpa_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + if (err) + return err; + + eth_type = htons(ETH_P_IPV6); + err = ofdpa_flow_tbl_term_mac(ofdpa_port, trans, + ofdpa_port->pport, in_pport_mask, + eth_type, ofdpa_port->dev->dev_addr, + dst_mac_mask, vlan_id, vlan_id_mask, + copy_to_cpu, flags); + + return err; +} + +static int ofdpa_port_fwding(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, int flags) +{ + bool pop_vlan; + u32 out_pport; + __be16 vlan_id; + u16 vid; + int err; + + /* Port will be forwarding-enabled if its STP state is LEARNING + * or FORWARDING. Traffic from CPU can still egress, regardless of + * port STP state. Use L2 interface group on port VLANs as a way + * to toggle port forwarding: if forwarding is disabled, L2 + * interface group will not exist. + */ + + if (ofdpa_port->stp_state != BR_STATE_LEARNING && + ofdpa_port->stp_state != BR_STATE_FORWARDING) + flags |= OFDPA_OP_FLAG_REMOVE; + + out_pport = ofdpa_port->pport; + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + vlan_id = htons(vid); + pop_vlan = ofdpa_vlan_id_is_internal(vlan_id); + err = ofdpa_group_l2_interface(ofdpa_port, trans, flags, + vlan_id, out_pport, pop_vlan); + if (err) { + netdev_err(ofdpa_port->dev, "Error (%d) port VLAN l2 group for pport %d\n", + err, out_pport); + return err; + } + } + + return 0; +} + +static int ofdpa_port_stp_update(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + int flags, u8 state) +{ + bool want[OFDPA_CTRL_MAX] = { 0, }; + bool prev_ctrls[OFDPA_CTRL_MAX]; + u8 uninitialized_var(prev_state); + int err; + int i; + + if (switchdev_trans_ph_prepare(trans)) { + memcpy(prev_ctrls, ofdpa_port->ctrls, sizeof(prev_ctrls)); + prev_state = ofdpa_port->stp_state; + } + + if (ofdpa_port->stp_state == state) + return 0; + + ofdpa_port->stp_state = state; + + switch (state) { + case BR_STATE_DISABLED: + /* port is completely disabled */ + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; + break; + case BR_STATE_LEARNING: + case BR_STATE_FORWARDING: + if (!ofdpa_port_is_ovsed(ofdpa_port)) + want[OFDPA_CTRL_LINK_LOCAL_MCAST] = true; + want[OFDPA_CTRL_IPV4_MCAST] = true; + want[OFDPA_CTRL_IPV6_MCAST] = true; + if (ofdpa_port_is_bridged(ofdpa_port)) + want[OFDPA_CTRL_DFLT_BRIDGING] = true; + else if (ofdpa_port_is_ovsed(ofdpa_port)) + want[OFDPA_CTRL_DFLT_OVS] = true; + else + want[OFDPA_CTRL_LOCAL_ARP] = true; + break; + } + + for (i = 0; i < OFDPA_CTRL_MAX; i++) { + if (want[i] != ofdpa_port->ctrls[i]) { + int ctrl_flags = flags | + (want[i] ? 0 : OFDPA_OP_FLAG_REMOVE); + err = ofdpa_port_ctrl(ofdpa_port, trans, ctrl_flags, + &ofdpa_ctrls[i]); + if (err) + goto err_out; + ofdpa_port->ctrls[i] = want[i]; + } + } + + err = ofdpa_port_fdb_flush(ofdpa_port, trans, flags); + if (err) + goto err_out; + + err = ofdpa_port_fwding(ofdpa_port, trans, flags); + +err_out: + if (switchdev_trans_ph_prepare(trans)) { + memcpy(ofdpa_port->ctrls, prev_ctrls, sizeof(prev_ctrls)); + ofdpa_port->stp_state = prev_state; + } + + return err; +} + +static int ofdpa_port_fwd_enable(struct ofdpa_port *ofdpa_port, int flags) +{ + if (ofdpa_port_is_bridged(ofdpa_port)) + /* bridge STP will enable port */ + return 0; + + /* port is not bridged, so simulate going to FORWARDING state */ + return ofdpa_port_stp_update(ofdpa_port, NULL, flags, + BR_STATE_FORWARDING); +} + +static int ofdpa_port_fwd_disable(struct ofdpa_port *ofdpa_port, int flags) +{ + if (ofdpa_port_is_bridged(ofdpa_port)) + /* bridge STP will disable port */ + return 0; + + /* port is not bridged, so simulate going to DISABLED state */ + return ofdpa_port_stp_update(ofdpa_port, NULL, flags, + BR_STATE_DISABLED); +} + +static int ofdpa_port_vlan_add(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, + u16 vid, u16 flags) +{ + int err; + + /* XXX deal with flags for PVID and untagged */ + + err = ofdpa_port_vlan(ofdpa_port, trans, 0, vid); + if (err) + return err; + + err = ofdpa_port_router_mac(ofdpa_port, trans, 0, htons(vid)); + if (err) + ofdpa_port_vlan(ofdpa_port, trans, + OFDPA_OP_FLAG_REMOVE, vid); + + return err; +} + +static int ofdpa_port_vlan_del(struct ofdpa_port *ofdpa_port, + u16 vid, u16 flags) +{ + int err; + + err = ofdpa_port_router_mac(ofdpa_port, NULL, + OFDPA_OP_FLAG_REMOVE, htons(vid)); + if (err) + return err; + + return ofdpa_port_vlan(ofdpa_port, NULL, + OFDPA_OP_FLAG_REMOVE, vid); +} + +static struct ofdpa_internal_vlan_tbl_entry * +ofdpa_internal_vlan_tbl_find(const struct ofdpa *ofdpa, int ifindex) +{ + struct ofdpa_internal_vlan_tbl_entry *found; + + hash_for_each_possible(ofdpa->internal_vlan_tbl, found, + entry, ifindex) { + if (found->ifindex == ifindex) + return found; + } + + return NULL; +} + +static __be16 ofdpa_port_internal_vlan_id_get(struct ofdpa_port *ofdpa_port, + int ifindex) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_internal_vlan_tbl_entry *entry; + struct ofdpa_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return 0; + + entry->ifindex = ifindex; + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); + if (found) { + kfree(entry); + goto found; + } + + found = entry; + hash_add(ofdpa->internal_vlan_tbl, &found->entry, found->ifindex); + + for (i = 0; i < OFDPA_N_INTERNAL_VLANS; i++) { + if (test_and_set_bit(i, ofdpa->internal_vlan_bitmap)) + continue; + found->vlan_id = htons(OFDPA_INTERNAL_VLAN_ID_BASE + i); + goto found; + } + + netdev_err(ofdpa_port->dev, "Out of internal VLAN IDs\n"); + +found: + found->ref_count++; + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + return found->vlan_id; +} + +static int ofdpa_port_fib_ipv4(struct ofdpa_port *ofdpa_port, + struct switchdev_trans *trans, __be32 dst, + int dst_len, const struct fib_info *fi, + u32 tb_id, int flags) +{ + const struct fib_nh *nh; + __be16 eth_type = htons(ETH_P_IP); + __be32 dst_mask = inet_make_mask(dst_len); + __be16 internal_vlan_id = ofdpa_port->internal_vlan_id; + u32 priority = fi->fib_priority; + enum rocker_of_dpa_table_id goto_tbl = + ROCKER_OF_DPA_TABLE_ID_ACL_POLICY; + u32 group_id; + bool nh_on_port; + bool has_gw; + u32 index; + int err; + + /* XXX support ECMP */ + + nh = fi->fib_nh; + nh_on_port = (fi->fib_dev == ofdpa_port->dev); + has_gw = !!nh->nh_gw; + + if (has_gw && nh_on_port) { + err = ofdpa_port_ipv4_nh(ofdpa_port, trans, flags, + nh->nh_gw, &index); + if (err) + return err; + + group_id = ROCKER_GROUP_L3_UNICAST(index); + } else { + /* Send to CPU for processing */ + group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0); + } + + err = ofdpa_flow_tbl_ucast4_routing(ofdpa_port, trans, eth_type, dst, + dst_mask, priority, goto_tbl, + group_id, flags); + if (err) + netdev_err(ofdpa_port->dev, "Error (%d) IPv4 route %pI4\n", + err, &dst); + + return err; +} + +static void +ofdpa_port_internal_vlan_id_put(const struct ofdpa_port *ofdpa_port, + int ifindex) +{ + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_internal_vlan_tbl_entry *found; + unsigned long lock_flags; + unsigned long bit; + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, lock_flags); + + found = ofdpa_internal_vlan_tbl_find(ofdpa, ifindex); + if (!found) { + netdev_err(ofdpa_port->dev, + "ifindex (%d) not found in internal VLAN tbl\n", + ifindex); + goto not_found; + } + + if (--found->ref_count <= 0) { + bit = ntohs(found->vlan_id) - OFDPA_INTERNAL_VLAN_ID_BASE; + clear_bit(bit, ofdpa->internal_vlan_bitmap); + hash_del(&found->entry); + kfree(found); + } + +not_found: + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, lock_flags); +} + +/********************************** + * Rocker world ops implementation + **********************************/ + +static int ofdpa_init(struct rocker *rocker) +{ + struct ofdpa *ofdpa = rocker->wpriv; + + ofdpa->rocker = rocker; + + hash_init(ofdpa->flow_tbl); + spin_lock_init(&ofdpa->flow_tbl_lock); + + hash_init(ofdpa->group_tbl); + spin_lock_init(&ofdpa->group_tbl_lock); + + hash_init(ofdpa->fdb_tbl); + spin_lock_init(&ofdpa->fdb_tbl_lock); + + hash_init(ofdpa->internal_vlan_tbl); + spin_lock_init(&ofdpa->internal_vlan_tbl_lock); + + hash_init(ofdpa->neigh_tbl); + spin_lock_init(&ofdpa->neigh_tbl_lock); + + setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, + (unsigned long) ofdpa); + mod_timer(&ofdpa->fdb_cleanup_timer, jiffies); + + return 0; +} + +static void ofdpa_fini(struct rocker *rocker) +{ + struct ofdpa *ofdpa = rocker->wpriv; + + unsigned long flags; + struct ofdpa_flow_tbl_entry *flow_entry; + struct ofdpa_group_tbl_entry *group_entry; + struct ofdpa_fdb_tbl_entry *fdb_entry; + struct ofdpa_internal_vlan_tbl_entry *internal_vlan_entry; + struct ofdpa_neigh_tbl_entry *neigh_entry; + struct hlist_node *tmp; + int bkt; + + del_timer_sync(&ofdpa->fdb_cleanup_timer); + + spin_lock_irqsave(&ofdpa->flow_tbl_lock, flags); + hash_for_each_safe(ofdpa->flow_tbl, bkt, tmp, flow_entry, entry) + hash_del(&flow_entry->entry); + spin_unlock_irqrestore(&ofdpa->flow_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->group_tbl_lock, flags); + hash_for_each_safe(ofdpa->group_tbl, bkt, tmp, group_entry, entry) + hash_del(&group_entry->entry); + spin_unlock_irqrestore(&ofdpa->group_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, flags); + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, fdb_entry, entry) + hash_del(&fdb_entry->entry); + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->internal_vlan_tbl_lock, flags); + hash_for_each_safe(ofdpa->internal_vlan_tbl, bkt, + tmp, internal_vlan_entry, entry) + hash_del(&internal_vlan_entry->entry); + spin_unlock_irqrestore(&ofdpa->internal_vlan_tbl_lock, flags); + + spin_lock_irqsave(&ofdpa->neigh_tbl_lock, flags); + hash_for_each_safe(ofdpa->neigh_tbl, bkt, tmp, neigh_entry, entry) + hash_del(&neigh_entry->entry); + spin_unlock_irqrestore(&ofdpa->neigh_tbl_lock, flags); +} + +static int ofdpa_port_pre_init(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port->ofdpa = rocker_port->rocker->wpriv; + ofdpa_port->rocker_port = rocker_port; + ofdpa_port->dev = rocker_port->dev; + ofdpa_port->pport = rocker_port->pport; + ofdpa_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC; + ofdpa_port->ageing_time = BR_DEFAULT_AGEING_TIME; + return 0; +} + +static int ofdpa_port_init(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err; + + switchdev_port_fwd_mark_set(ofdpa_port->dev, NULL, false); + rocker_port_set_learning(rocker_port, + !!(ofdpa_port->brport_flags & BR_LEARNING)); + + err = ofdpa_port_ig_tbl(ofdpa_port, NULL, 0); + if (err) { + netdev_err(ofdpa_port->dev, "install ig port table failed\n"); + return err; + } + + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, + ofdpa_port->dev->ifindex); + + err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); + if (err) { + netdev_err(ofdpa_port->dev, "install untagged VLAN failed\n"); + goto err_untagged_vlan; + } + return 0; + +err_untagged_vlan: + ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); + return err; +} + +static void ofdpa_port_fini(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port_ig_tbl(ofdpa_port, NULL, OFDPA_OP_FLAG_REMOVE); +} + +static int ofdpa_port_open(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fwd_enable(ofdpa_port, 0); +} + +static void ofdpa_port_stop(struct rocker_port *rocker_port) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + ofdpa_port_fwd_disable(ofdpa_port, OFDPA_OP_FLAG_NOWAIT); +} + +static int ofdpa_port_attr_stp_state_set(struct rocker_port *rocker_port, + u8 state, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_stp_update(ofdpa_port, trans, 0, state); +} + +static int ofdpa_port_attr_bridge_flags_set(struct rocker_port *rocker_port, + unsigned long brport_flags, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + unsigned long orig_flags; + int err = 0; + + orig_flags = ofdpa_port->brport_flags; + ofdpa_port->brport_flags = brport_flags; + if ((orig_flags ^ ofdpa_port->brport_flags) & BR_LEARNING && + !switchdev_trans_ph_prepare(trans)) + err = rocker_port_set_learning(ofdpa_port->rocker_port, + !!(ofdpa_port->brport_flags & BR_LEARNING)); + + if (switchdev_trans_ph_prepare(trans)) + ofdpa_port->brport_flags = orig_flags; + + return err; +} + +static int +ofdpa_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, + unsigned long *p_brport_flags) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + *p_brport_flags = ofdpa_port->brport_flags; + return 0; +} + +static int +ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, + u32 ageing_time, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + if (!switchdev_trans_ph_prepare(trans)) { + ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); + mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); + } + + return 0; +} + +static int ofdpa_port_obj_vlan_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ofdpa_port_vlan_add(ofdpa_port, trans, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int ofdpa_port_obj_vlan_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + err = ofdpa_port_vlan_del(ofdpa_port, vid, vlan->flags); + if (err) + return err; + } + + return 0; +} + +static int ofdpa_port_obj_vlan_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_vlan *vlan, + switchdev_obj_dump_cb_t *cb) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + u16 vid; + int err = 0; + + for (vid = 1; vid < VLAN_N_VID; vid++) { + if (!test_bit(vid, ofdpa_port->vlan_bitmap)) + continue; + vlan->flags = 0; + if (ofdpa_vlan_id_is_internal(htons(vid))) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + vlan->vid_begin = vlan->vid_end = vid; + err = cb(&vlan->obj); + if (err) + break; + } + + return err; +} + +static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fib_ipv4(ofdpa_port, trans, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, 0); +} + +static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port, + const struct switchdev_obj_ipv4_fib *fib4) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + + return ofdpa_port_fib_ipv4(ofdpa_port, NULL, + htonl(fib4->dst), fib4->dst_len, + &fib4->fi, fib4->tb_id, + OFDPA_OP_FLAG_REMOVE); +} + +static int ofdpa_port_obj_fdb_add(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb, + struct switchdev_trans *trans) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return -EINVAL; + + return ofdpa_port_fdb(ofdpa_port, trans, fdb->addr, vlan_id, 0); +} + +static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, + const struct switchdev_obj_port_fdb *fdb) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + __be16 vlan_id = ofdpa_port_vid_to_vlan(ofdpa_port, fdb->vid, NULL); + int flags = OFDPA_OP_FLAG_REMOVE; + + if (!ofdpa_port_is_bridged(ofdpa_port)) + return -EINVAL; + + return ofdpa_port_fdb(ofdpa_port, NULL, fdb->addr, vlan_id, flags); +} + +static int ofdpa_port_obj_fdb_dump(const struct rocker_port *rocker_port, + struct switchdev_obj_port_fdb *fdb, + switchdev_obj_dump_cb_t *cb) +{ + const struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + struct ofdpa *ofdpa = ofdpa_port->ofdpa; + struct ofdpa_fdb_tbl_entry *found; + struct hlist_node *tmp; + unsigned long lock_flags; + int bkt; + int err = 0; + + spin_lock_irqsave(&ofdpa->fdb_tbl_lock, lock_flags); + hash_for_each_safe(ofdpa->fdb_tbl, bkt, tmp, found, entry) { + if (found->key.ofdpa_port != ofdpa_port) + continue; + ether_addr_copy(fdb->addr, found->key.addr); + fdb->ndm_state = NUD_REACHABLE; + fdb->vid = ofdpa_port_vlan_to_vid(ofdpa_port, + found->key.vlan_id); + err = cb(&fdb->obj); + if (err) + break; + } + spin_unlock_irqrestore(&ofdpa->fdb_tbl_lock, lock_flags); + + return err; +} + +static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, + struct net_device *bridge) +{ + int err; + + /* Port is joining bridge, so the internal VLAN for the + * port is going to change to the bridge internal VLAN. + * Let's remove untagged VLAN (vid=0) from port and + * re-add once internal VLAN has changed. + */ + + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + ofdpa_port_internal_vlan_id_put(ofdpa_port, + ofdpa_port->dev->ifindex); + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, bridge->ifindex); + + ofdpa_port->bridge_dev = bridge; + switchdev_port_fwd_mark_set(ofdpa_port->dev, bridge, true); + + return ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); +} + +static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) +{ + int err; + + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + ofdpa_port_internal_vlan_id_put(ofdpa_port, + ofdpa_port->bridge_dev->ifindex); + ofdpa_port->internal_vlan_id = + ofdpa_port_internal_vlan_id_get(ofdpa_port, + ofdpa_port->dev->ifindex); + + switchdev_port_fwd_mark_set(ofdpa_port->dev, ofdpa_port->bridge_dev, + false); + ofdpa_port->bridge_dev = NULL; + + err = ofdpa_port_vlan_add(ofdpa_port, NULL, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + if (ofdpa_port->dev->flags & IFF_UP) + err = ofdpa_port_fwd_enable(ofdpa_port, 0); + + return err; +} + +static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, + struct net_device *master) +{ + int err; + + ofdpa_port->bridge_dev = master; + + err = ofdpa_port_fwd_disable(ofdpa_port, 0); + if (err) + return err; + err = ofdpa_port_fwd_enable(ofdpa_port, 0); + + return err; +} + +static int ofdpa_port_master_linked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err = 0; + + if (netif_is_bridge_master(master)) + err = ofdpa_port_bridge_join(ofdpa_port, master); + else if (netif_is_ovs_master(master)) + err = ofdpa_port_ovs_changed(ofdpa_port, master); + return err; +} + +static int ofdpa_port_master_unlinked(struct rocker_port *rocker_port, + struct net_device *master) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int err = 0; + + if (ofdpa_port_is_bridged(ofdpa_port)) + err = ofdpa_port_bridge_leave(ofdpa_port); + else if (ofdpa_port_is_ovsed(ofdpa_port)) + err = ofdpa_port_ovs_changed(ofdpa_port, NULL); + return err; +} + +static int ofdpa_port_neigh_update(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = (n->nud_state & NUD_VALID ? 0 : OFDPA_OP_FLAG_REMOVE) | + OFDPA_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *) n->primary_key; + + return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); +} + +static int ofdpa_port_neigh_destroy(struct rocker_port *rocker_port, + struct neighbour *n) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = OFDPA_OP_FLAG_REMOVE | OFDPA_OP_FLAG_NOWAIT; + __be32 ip_addr = *(__be32 *) n->primary_key; + + return ofdpa_port_ipv4_neigh(ofdpa_port, NULL, flags, ip_addr, n->ha); +} + +static int ofdpa_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, + const unsigned char *addr, + __be16 vlan_id) +{ + struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_LEARNED; + + if (ofdpa_port->stp_state != BR_STATE_LEARNING && + ofdpa_port->stp_state != BR_STATE_FORWARDING) + return 0; + + return ofdpa_port_fdb(ofdpa_port, NULL, addr, vlan_id, flags); +} + struct rocker_world_ops rocker_ofdpa_ops = { .kind = "ofdpa", .priv_size = sizeof(struct ofdpa), .port_priv_size = sizeof(struct ofdpa_port), .mode = ROCKER_PORT_MODE_OF_DPA, + .init = ofdpa_init, + .fini = ofdpa_fini, + .port_pre_init = ofdpa_port_pre_init, + .port_init = ofdpa_port_init, + .port_fini = ofdpa_port_fini, + .port_open = ofdpa_port_open, + .port_stop = ofdpa_port_stop, + .port_attr_stp_state_set = ofdpa_port_attr_stp_state_set, + .port_attr_bridge_flags_set = ofdpa_port_attr_bridge_flags_set, + .port_attr_bridge_flags_get = ofdpa_port_attr_bridge_flags_get, + .port_attr_bridge_ageing_time_set = ofdpa_port_attr_bridge_ageing_time_set, + .port_obj_vlan_add = ofdpa_port_obj_vlan_add, + .port_obj_vlan_del = ofdpa_port_obj_vlan_del, + .port_obj_vlan_dump = ofdpa_port_obj_vlan_dump, + .port_obj_fib4_add = ofdpa_port_obj_fib4_add, + .port_obj_fib4_del = ofdpa_port_obj_fib4_del, + .port_obj_fdb_add = ofdpa_port_obj_fdb_add, + .port_obj_fdb_del = ofdpa_port_obj_fdb_del, + .port_obj_fdb_dump = ofdpa_port_obj_fdb_dump, + .port_master_linked = ofdpa_port_master_linked, + .port_master_unlinked = ofdpa_port_master_unlinked, + .port_neigh_update = ofdpa_port_neigh_update, + .port_neigh_destroy = ofdpa_port_neigh_destroy, + .port_ev_mac_vlan_seen = ofdpa_port_ev_mac_vlan_seen, }; -- cgit v1.2.3 From fccd84d44912a15bea4a2265f5437f18330e7ce1 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Tue, 16 Feb 2016 15:14:51 +0100 Subject: rocker: return -EOPNOTSUPP for undefined world ops Suggested-by: Scott Feldman Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 33 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 1efd8b79b029..acafbf870182 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1547,7 +1547,7 @@ static int rocker_world_port_attr_stp_state_set(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_attr_stp_state_set) - return 0; + return -EOPNOTSUPP; return wops->port_attr_stp_state_set(rocker_port, state, trans); } @@ -1559,7 +1559,7 @@ rocker_world_port_attr_bridge_flags_set(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_attr_bridge_flags_set) - return 0; + return -EOPNOTSUPP; return wops->port_attr_bridge_flags_set(rocker_port, brport_flags, trans); } @@ -1571,7 +1571,7 @@ rocker_world_port_attr_bridge_flags_get(const struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_attr_bridge_flags_get) - return 0; + return -EOPNOTSUPP; return wops->port_attr_bridge_flags_get(rocker_port, p_brport_flags); } @@ -1584,7 +1584,7 @@ rocker_world_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_attr_bridge_ageing_time_set) - return 0; + return -EOPNOTSUPP; return wops->port_attr_bridge_ageing_time_set(rocker_port, ageing_time, trans); } @@ -1597,6 +1597,7 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_vlan_add) + return -EOPNOTSUPP; return 0; return wops->port_obj_vlan_add(rocker_port, vlan, trans); } @@ -1608,7 +1609,7 @@ rocker_world_port_obj_vlan_del(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_vlan_del) - return 0; + return -EOPNOTSUPP; return wops->port_obj_vlan_del(rocker_port, vlan); } @@ -1620,7 +1621,7 @@ rocker_world_port_obj_vlan_dump(const struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_vlan_dump) - return 0; + return -EOPNOTSUPP; return wops->port_obj_vlan_dump(rocker_port, vlan, cb); } @@ -1632,7 +1633,7 @@ rocker_world_port_obj_fib4_add(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_fib4_add) - return 0; + return -EOPNOTSUPP; return wops->port_obj_fib4_add(rocker_port, fib4, trans); } @@ -1643,7 +1644,7 @@ rocker_world_port_obj_fib4_del(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_fib4_del) - return 0; + return -EOPNOTSUPP; return wops->port_obj_fib4_del(rocker_port, fib4); } @@ -1655,7 +1656,7 @@ rocker_world_port_obj_fdb_add(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_fdb_add) - return 0; + return -EOPNOTSUPP; return wops->port_obj_fdb_add(rocker_port, fdb, trans); } @@ -1666,7 +1667,7 @@ rocker_world_port_obj_fdb_del(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_fdb_del) - return 0; + return -EOPNOTSUPP; return wops->port_obj_fdb_del(rocker_port, fdb); } @@ -1678,7 +1679,7 @@ rocker_world_port_obj_fdb_dump(const struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_obj_fdb_dump) - return 0; + return -EOPNOTSUPP; return wops->port_obj_fdb_dump(rocker_port, fdb, cb); } @@ -1688,7 +1689,7 @@ static int rocker_world_port_master_linked(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_master_linked) - return 0; + return -EOPNOTSUPP; return wops->port_master_linked(rocker_port, master); } @@ -1698,7 +1699,7 @@ static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_master_unlinked) - return 0; + return -EOPNOTSUPP; return wops->port_master_unlinked(rocker_port, master); } @@ -1708,7 +1709,7 @@ static int rocker_world_port_neigh_update(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_neigh_update) - return 0; + return -EOPNOTSUPP; return wops->port_neigh_update(rocker_port, n); } @@ -1718,7 +1719,7 @@ static int rocker_world_port_neigh_destroy(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_neigh_destroy) - return 0; + return -EOPNOTSUPP; return wops->port_neigh_destroy(rocker_port, n); } @@ -1729,7 +1730,7 @@ static int rocker_world_port_ev_mac_vlan_seen(struct rocker_port *rocker_port, struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_ev_mac_vlan_seen) - return 0; + return -EOPNOTSUPP; return wops->port_ev_mac_vlan_seen(rocker_port, addr, vlan_id); } -- cgit v1.2.3 From d4ac05ff3697e036dcb0e2e284c5f7eb77cc0966 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:58:57 +0100 Subject: vxlan: introduce vxlan_hdr Currently, pointer to the vxlan header is kept in a local variable. It has to be reloaded whenever the pskb pull operations are performed which usually happens somewhere deep in called functions. Create a vxlan_hdr function and use it to reference the vxlan header instead. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 17 +++++++---------- include/net/vxlan.h | 5 +++++ 2 files changed, 12 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ee1206d9f8df..524e3b139122 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1257,7 +1257,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; struct vxlan_sock *vs; - struct vxlanhdr *vxh; u32 flags, vni; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; @@ -1266,9 +1265,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, VXLAN_HLEN)) goto error; - vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); - flags = ntohl(vxh->vx_flags); - vni = ntohl(vxh->vx_vni); + flags = ntohl(vxlan_hdr(skb)->vx_flags); + vni = ntohl(vxlan_hdr(skb)->vx_vni); if (flags & VXLAN_HF_VNI) { flags &= ~VXLAN_HF_VNI; @@ -1279,16 +1277,14 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) goto drop; - vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); vs = rcu_dereference_sk_user_data(sk); if (!vs) goto drop; if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { - vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni, - !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL)); - if (!vxh) + if (!vxlan_remcsum(skb, vxlan_hdr(skb), sizeof(struct vxlanhdr), vni, + !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL))) goto drop; flags &= ~VXLAN_HF_RCO; @@ -1313,7 +1309,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) { struct vxlanhdr_gbp *gbp; - gbp = (struct vxlanhdr_gbp *)vxh; + gbp = (struct vxlanhdr_gbp *)vxlan_hdr(skb); md->gbp = ntohs(gbp->policy_id); if (tun_dst) @@ -1351,7 +1347,8 @@ drop: bad_flags: netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", - ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); + ntohl(vxlan_hdr(skb)->vx_flags), + ntohl(vxlan_hdr(skb)->vx_vni)); error: if (tun_dst) diff --git a/include/net/vxlan.h b/include/net/vxlan.h index b314e4af89c5..3f38b40ec4aa 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -262,6 +262,11 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, /* IPv6 header + UDP + VXLAN + Ethernet header */ #define VXLAN6_HEADROOM (40 + 8 + 8 + 14) +static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) +{ + return (struct vxlanhdr *)(udp_hdr(skb) + 1); +} + #if IS_ENABLED(CONFIG_VXLAN) void vxlan_get_rx_port(struct net_device *netdev); #else -- cgit v1.2.3 From 54bfd872bf16d40b61bd0cd9b769b2fef67dd272 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:58:58 +0100 Subject: vxlan: keep flags and vni in network byte order Prevent repeated conversions from and to network order in the fast path. To achieve this, define all flag constants in big endian order and store VNI as __be32. To prevent confusion between the actual VNI value and the VNI field from the header (which contains additional reserved byte), strictly distinguish between "vni" and "vni_field". Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 115 ++++++++++++++++++++++++++-------------------------- include/net/vxlan.h | 70 +++++++++++++++++++++++++++----- 2 files changed, 116 insertions(+), 69 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 524e3b139122..4e3d3dfe2a0e 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -197,9 +197,9 @@ static int vxlan_nla_put_addr(struct sk_buff *skb, int attr, #endif /* Virtual Network hash table head */ -static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id) +static inline struct hlist_head *vni_head(struct vxlan_sock *vs, __be32 vni) { - return &vs->vni_list[hash_32(id, VNI_HASH_BITS)]; + return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)]; } /* Socket hash table head */ @@ -242,12 +242,12 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, return NULL; } -static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) +static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) { struct vxlan_dev *vxlan; - hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) { - if (vxlan->default_dst.remote_vni == id) + hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { + if (vxlan->default_dst.remote_vni == vni) return vxlan; } @@ -255,7 +255,7 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) } /* Look up VNI in a per net namespace table */ -static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, +static struct vxlan_dev *vxlan_find_vni(struct net *net, __be32 vni, sa_family_t family, __be16 port, u32 flags) { @@ -265,7 +265,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, if (!vs) return NULL; - return vxlan_vs_find_vni(vs, id); + return vxlan_vs_find_vni(vs, vni); } /* Fill in neighbour message in skbuff. */ @@ -315,7 +315,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, nla_put_be16(skb, NDA_PORT, rdst->remote_port)) goto nla_put_failure; if (rdst->remote_vni != vxlan->default_dst.remote_vni && - nla_put_u32(skb, NDA_VNI, rdst->remote_vni)) + nla_put_u32(skb, NDA_VNI, be32_to_cpu(rdst->remote_vni))) goto nla_put_failure; if (rdst->remote_ifindex && nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex)) @@ -383,7 +383,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) }; struct vxlan_rdst remote = { .remote_ip = *ipa, /* goes to NDA_DST */ - .remote_vni = VXLAN_N_VID, + .remote_vni = cpu_to_be32(VXLAN_N_VID), }; vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); @@ -452,7 +452,7 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, /* caller should hold vxlan->hash_lock */ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, - __u32 vni, __u32 ifindex) + __be32 vni, __u32 ifindex) { struct vxlan_rdst *rd; @@ -469,7 +469,8 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f, /* Replace destination of unicast mac */ static int vxlan_fdb_replace(struct vxlan_fdb *f, - union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex) + union vxlan_addr *ip, __be16 port, __be32 vni, + __u32 ifindex) { struct vxlan_rdst *rd; @@ -491,7 +492,7 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f, /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, - union vxlan_addr *ip, __be16 port, __u32 vni, + union vxlan_addr *ip, __be16 port, __be32 vni, __u32 ifindex, struct vxlan_rdst **rdp) { struct vxlan_rdst *rd; @@ -523,7 +524,8 @@ static int vxlan_fdb_append(struct vxlan_fdb *f, static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, unsigned int off, struct vxlanhdr *vh, size_t hdrlen, - u32 data, struct gro_remcsum *grc, + __be32 vni_field, + struct gro_remcsum *grc, bool nopartial) { size_t start, offset; @@ -534,10 +536,8 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, if (!NAPI_GRO_CB(skb)->csum_valid) return NULL; - start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; - offset = start + ((data & VXLAN_RCO_UDP) ? - offsetof(struct udphdr, check) : - offsetof(struct tcphdr, check)); + start = vxlan_rco_start(vni_field); + offset = start + vxlan_rco_offset(vni_field); vh = skb_gro_remcsum_process(skb, (void *)vh, off, hdrlen, start, offset, grc, nopartial); @@ -557,7 +557,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, int flush = 1; struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock, udp_offloads); - u32 flags; + __be32 flags; struct gro_remcsum grc; skb_gro_remcsum_init(&grc); @@ -573,11 +573,11 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, skb_gro_postpull_rcsum(skb, vh, sizeof(struct vxlanhdr)); - flags = ntohl(vh->vx_flags); + flags = vh->vx_flags; if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), - ntohl(vh->vx_vni), &grc, + vh->vx_vni, &grc, !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL)); @@ -668,7 +668,7 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs) static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, - __be16 port, __u32 vni, __u32 ifindex, + __be16 port, __be32 vni, __u32 ifindex, __u8 ndm_flags) { struct vxlan_rdst *rd = NULL; @@ -777,7 +777,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) } static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, - union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex) + union vxlan_addr *ip, __be16 *port, __be32 *vni, + u32 *ifindex) { struct net *net = dev_net(vxlan->dev); int err; @@ -810,7 +811,7 @@ static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, if (tb[NDA_VNI]) { if (nla_len(tb[NDA_VNI]) != sizeof(u32)) return -EINVAL; - *vni = nla_get_u32(tb[NDA_VNI]); + *vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); } else { *vni = vxlan->default_dst.remote_vni; } @@ -840,7 +841,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], /* struct net *net = dev_net(vxlan->dev); */ union vxlan_addr ip; __be16 port; - u32 vni, ifindex; + __be32 vni; + u32 ifindex; int err; if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { @@ -877,7 +879,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], struct vxlan_rdst *rd = NULL; union vxlan_addr ip; __be16 port; - u32 vni, ifindex; + __be32 vni; + u32 ifindex; int err; err = vxlan_fdb_parse(tb, vxlan, &ip, &port, &vni, &ifindex); @@ -1133,17 +1136,16 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan) } static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, - size_t hdrlen, u32 data, bool nopartial) + size_t hdrlen, __be32 vni_field, + bool nopartial) { size_t start, offset, plen; if (skb->remcsum_offload) return vh; - start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; - offset = start + ((data & VXLAN_RCO_UDP) ? - offsetof(struct udphdr, check) : - offsetof(struct tcphdr, check)); + start = vxlan_rco_start(vni_field); + offset = start + vxlan_rco_offset(vni_field); plen = hdrlen + offset + sizeof(u16); @@ -1159,7 +1161,7 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, } static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, - struct vxlan_metadata *md, u32 vni, + struct vxlan_metadata *md, __be32 vni, struct metadata_dst *tun_dst) { struct iphdr *oip = NULL; @@ -1257,7 +1259,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; struct vxlan_sock *vs; - u32 flags, vni; + __be32 flags, vni_field; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; @@ -1265,8 +1267,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, VXLAN_HLEN)) goto error; - flags = ntohl(vxlan_hdr(skb)->vx_flags); - vni = ntohl(vxlan_hdr(skb)->vx_vni); + flags = vxlan_hdr(skb)->vx_flags; + vni_field = vxlan_hdr(skb)->vx_vni; if (flags & VXLAN_HF_VNI) { flags &= ~VXLAN_HF_VNI; @@ -1283,17 +1285,18 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { - if (!vxlan_remcsum(skb, vxlan_hdr(skb), sizeof(struct vxlanhdr), vni, + if (!vxlan_remcsum(skb, vxlan_hdr(skb), sizeof(struct vxlanhdr), + vni_field, !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL))) goto drop; flags &= ~VXLAN_HF_RCO; - vni &= VXLAN_VNI_MASK; + vni_field &= VXLAN_VNI_MASK; } if (vxlan_collect_metadata(vs)) { tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, - cpu_to_be64(vni >> 8), sizeof(*md)); + vxlan_vni(vni_field), sizeof(*md)); if (!tun_dst) goto drop; @@ -1324,7 +1327,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) flags &= ~VXLAN_GBP_USED_BITS; } - if (flags || vni & ~VXLAN_VNI_MASK) { + if (flags || vni_field & ~VXLAN_VNI_MASK) { /* If there are any unprocessed flags remaining treat * this as a malformed packet. This behavior diverges from * VXLAN RFC (RFC7348) which stipulates that bits in reserved @@ -1337,7 +1340,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto bad_flags; } - vxlan_rcv(vs, skb, md, vni >> 8, tun_dst); + vxlan_rcv(vs, skb, md, vxlan_vni(vni_field), tun_dst); return 0; drop: @@ -1680,7 +1683,7 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags, return; gbp = (struct vxlanhdr_gbp *)vxh; - vxh->vx_flags |= htonl(VXLAN_HF_GBP); + vxh->vx_flags |= VXLAN_HF_GBP; if (md->gbp & VXLAN_GBP_DONT_LEARN) gbp->dont_learn = 1; @@ -1700,7 +1703,6 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, int min_headroom; int err; int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL; - u16 hdrlen = sizeof(struct vxlanhdr); if ((vxflags & VXLAN_F_REMCSUM_TX) && skb->ip_summed == CHECKSUM_PARTIAL) { @@ -1733,18 +1735,15 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst, return PTR_ERR(skb); vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); - vxh->vx_flags = htonl(VXLAN_HF_VNI); - vxh->vx_vni = vni; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(vni); if (type & SKB_GSO_TUNNEL_REMCSUM) { - u32 data = (skb_checksum_start_offset(skb) - hdrlen) >> - VXLAN_RCO_SHIFT; + unsigned int start; - if (skb->csum_offset == offsetof(struct udphdr, check)) - data |= VXLAN_RCO_UDP; - - vxh->vx_vni |= htonl(data); - vxh->vx_flags |= htonl(VXLAN_HF_RCO); + start = skb_checksum_start_offset(skb) - sizeof(struct vxlanhdr); + vxh->vx_vni |= vxlan_compute_rco(start, skb->csum_offset); + vxh->vx_flags |= VXLAN_HF_RCO; if (!skb_is_gso(skb)) { skb->ip_summed = CHECKSUM_NONE; @@ -1892,7 +1891,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; - u32 vni; + __be32 vni; __be16 df = 0; __u8 tos, ttl; int err; @@ -1914,7 +1913,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; } dst_port = info->key.tp_dst ? : vxlan->cfg.dst_port; - vni = be64_to_cpu(info->key.tun_id); + vni = vxlan_tun_id_to_vni(info->key.tun_id); remote_ip.sa.sa_family = ip_tunnel_info_af(info); if (remote_ip.sa.sa_family == AF_INET) remote_ip.sin.sin_addr.s_addr = info->key.u.ipv4.dst; @@ -2007,7 +2006,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), - htonl(vni << 8), md, flags, udp_sum); + vni, md, flags, udp_sum); if (err < 0) goto xmit_tx_error; @@ -2065,7 +2064,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), - htonl(vni << 8), md, flags, udp_sum); + vni, md, flags, udp_sum); if (err < 0) { dst_release(ndst); return; @@ -2222,7 +2221,7 @@ static void vxlan_cleanup(unsigned long arg) static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) { struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); - __u32 vni = vxlan->default_dst.remote_vni; + __be32 vni = vxlan->default_dst.remote_vni; spin_lock(&vn->sock_lock); hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni)); @@ -2837,7 +2836,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, memset(&conf, 0, sizeof(conf)); if (data[IFLA_VXLAN_ID]) - conf.vni = nla_get_u32(data[IFLA_VXLAN_ID]); + conf.vni = cpu_to_be32(nla_get_u32(data[IFLA_VXLAN_ID])); if (data[IFLA_VXLAN_GROUP]) { conf.remote_ip.sin.sin_addr.s_addr = nla_get_in_addr(data[IFLA_VXLAN_GROUP]); @@ -2941,7 +2940,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, break; case -EEXIST: - pr_info("duplicate VNI %u\n", conf.vni); + pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni)); break; } @@ -2999,7 +2998,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) .high = htons(vxlan->cfg.port_max), }; - if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni)) + if (nla_put_u32(skb, IFLA_VXLAN_ID, be32_to_cpu(dst->remote_vni))) goto nla_put_failure; if (!vxlan_addr_any(&dst->remote_ip)) { diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 3f38b40ec4aa..1b85a3b40c5a 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -24,11 +24,11 @@ struct vxlanhdr { }; /* VXLAN header flags. */ -#define VXLAN_HF_VNI BIT(27) +#define VXLAN_HF_VNI cpu_to_be32(BIT(27)) #define VXLAN_N_VID (1u << 24) #define VXLAN_VID_MASK (VXLAN_N_VID - 1) -#define VXLAN_VNI_MASK (VXLAN_VID_MASK << 8) +#define VXLAN_VNI_MASK cpu_to_be32(VXLAN_VID_MASK << 8) #define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr)) #define VNI_HASH_BITS 10 @@ -55,14 +55,14 @@ struct vxlanhdr { */ /* VXLAN-RCO header flags. */ -#define VXLAN_HF_RCO BIT(21) +#define VXLAN_HF_RCO cpu_to_be32(BIT(21)) /* Remote checksum offload header option */ -#define VXLAN_RCO_MASK 0x7f /* Last byte of vni field */ -#define VXLAN_RCO_UDP 0x80 /* Indicate UDP RCO (TCP when not set *) */ -#define VXLAN_RCO_SHIFT 1 /* Left shift of start */ +#define VXLAN_RCO_MASK cpu_to_be32(0x7f) /* Last byte of vni field */ +#define VXLAN_RCO_UDP cpu_to_be32(0x80) /* Indicate UDP RCO (TCP when not set *) */ +#define VXLAN_RCO_SHIFT 1 /* Left shift of start */ #define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1) -#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT) +#define VXLAN_MAX_REMCSUM_START (0x7f << VXLAN_RCO_SHIFT) /* * VXLAN Group Based Policy Extension (VXLAN_F_GBP): @@ -105,9 +105,9 @@ struct vxlanhdr_gbp { }; /* VXLAN-GBP header flags. */ -#define VXLAN_HF_GBP BIT(31) +#define VXLAN_HF_GBP cpu_to_be32(BIT(31)) -#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF) +#define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | cpu_to_be32(0xFFFFFF)) /* skb->mark mapping * @@ -144,7 +144,7 @@ union vxlan_addr { struct vxlan_rdst { union vxlan_addr remote_ip; __be16 remote_port; - u32 remote_vni; + __be32 remote_vni; u32 remote_ifindex; struct list_head list; struct rcu_head rcu; @@ -154,7 +154,7 @@ struct vxlan_rdst { struct vxlan_config { union vxlan_addr remote_ip; union vxlan_addr saddr; - u32 vni; + __be32 vni; int remote_ifindex; int mtu; __be16 dst_port; @@ -267,6 +267,54 @@ static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) return (struct vxlanhdr *)(udp_hdr(skb) + 1); } +static inline __be32 vxlan_vni(__be32 vni_field) +{ +#if defined(__BIG_ENDIAN) + return vni_field >> 8; +#else + return (vni_field & VXLAN_VNI_MASK) << 8; +#endif +} + +static inline __be32 vxlan_vni_field(__be32 vni) +{ +#if defined(__BIG_ENDIAN) + return vni << 8; +#else + return vni >> 8; +#endif +} + +static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id) +{ +#if defined(__BIG_ENDIAN) + return tun_id; +#else + return tun_id >> 32; +#endif +} + +static inline size_t vxlan_rco_start(__be32 vni_field) +{ + return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; +} + +static inline size_t vxlan_rco_offset(__be32 vni_field) +{ + return (vni_field & VXLAN_RCO_UDP) ? + offsetof(struct udphdr, check) : + offsetof(struct tcphdr, check); +} + +static inline __be32 vxlan_compute_rco(unsigned int start, unsigned int offset) +{ + __be32 vni_field = cpu_to_be32(start >> VXLAN_RCO_SHIFT); + + if (offset == offsetof(struct udphdr, check)) + vni_field |= VXLAN_RCO_UDP; + return vni_field; +} + #if IS_ENABLED(CONFIG_VXLAN) void vxlan_get_rx_port(struct net_device *netdev); #else -- cgit v1.2.3 From be5cfeab8f95995d5590ab919b9f4dde19d50ea7 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:58:59 +0100 Subject: vxlan: simplify vxlan_remcsum Part of the parameters is not needed. Simplify the caller of this function in preparation of making vxlan rx more comprehensible. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 4e3d3dfe2a0e..161e39ce3914 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1135,29 +1135,25 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan) return ret; } -static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, - size_t hdrlen, __be32 vni_field, - bool nopartial) +static bool vxlan_remcsum(struct sk_buff *skb, u32 vxflags, __be32 vni_field) { size_t start, offset, plen; if (skb->remcsum_offload) - return vh; + return true; start = vxlan_rco_start(vni_field); offset = start + vxlan_rco_offset(vni_field); - plen = hdrlen + offset + sizeof(u16); + plen = sizeof(struct vxlanhdr) + offset + sizeof(u16); if (!pskb_may_pull(skb, plen)) - return NULL; - - vh = (struct vxlanhdr *)(udp_hdr(skb) + 1); + return false; - skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset, - nopartial); + skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, + !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); - return vh; + return true; } static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, @@ -1285,9 +1281,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { - if (!vxlan_remcsum(skb, vxlan_hdr(skb), sizeof(struct vxlanhdr), - vni_field, - !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL))) + if (!vxlan_remcsum(skb, vs->flags, vni_field)) goto drop; flags &= ~VXLAN_HF_RCO; -- cgit v1.2.3 From 3288af0892e322b019a4c98810959b2a1c6dae7b Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:59:00 +0100 Subject: vxlan: move GBP header parsing to a separate function To make vxlan_udp_encap_recv shorter and more comprehensible. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 161e39ce3914..f65a923c2999 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1156,6 +1156,24 @@ static bool vxlan_remcsum(struct sk_buff *skb, u32 vxflags, __be32 vni_field) return true; } +static void vxlan_parse_gbp_hdr(struct sk_buff *skb, struct vxlan_metadata *md, + struct metadata_dst *tun_dst) +{ + struct vxlanhdr_gbp *gbp; + + gbp = (struct vxlanhdr_gbp *)vxlan_hdr(skb); + md->gbp = ntohs(gbp->policy_id); + + if (tun_dst) + tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; + + if (gbp->dont_learn) + md->gbp |= VXLAN_GBP_DONT_LEARN; + + if (gbp->policy_applied) + md->gbp |= VXLAN_GBP_POLICY_APPLIED; +} + static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, struct vxlan_metadata *md, __be32 vni, struct metadata_dst *tun_dst) @@ -1304,20 +1322,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) * used by VXLAN extensions if explicitly requested. */ if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) { - struct vxlanhdr_gbp *gbp; - - gbp = (struct vxlanhdr_gbp *)vxlan_hdr(skb); - md->gbp = ntohs(gbp->policy_id); - - if (tun_dst) - tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; - - if (gbp->dont_learn) - md->gbp |= VXLAN_GBP_DONT_LEARN; - - if (gbp->policy_applied) - md->gbp |= VXLAN_GBP_POLICY_APPLIED; - + vxlan_parse_gbp_hdr(skb, md, tun_dst); flags &= ~VXLAN_GBP_USED_BITS; } -- cgit v1.2.3 From f14ecebb3a4e83eb6233e0167aa4ba675c99e514 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:59:01 +0100 Subject: vxlan: clean up extension handling on rx Bring the extension handling to a single place and move the actual handling logic out of vxlan_udp_encap_recv as much as possible. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 62 +++++++++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f65a923c2999..ac688dc75c66 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1135,15 +1135,16 @@ static int vxlan_igmp_leave(struct vxlan_dev *vxlan) return ret; } -static bool vxlan_remcsum(struct sk_buff *skb, u32 vxflags, __be32 vni_field) +static bool vxlan_remcsum(struct vxlanhdr *unparsed, + struct sk_buff *skb, u32 vxflags) { size_t start, offset, plen; - if (skb->remcsum_offload) - return true; + if (!(unparsed->vx_flags & VXLAN_HF_RCO) || skb->remcsum_offload) + goto out; - start = vxlan_rco_start(vni_field); - offset = start + vxlan_rco_offset(vni_field); + start = vxlan_rco_start(unparsed->vx_vni); + offset = start + vxlan_rco_offset(unparsed->vx_vni); plen = sizeof(struct vxlanhdr) + offset + sizeof(u16); @@ -1152,16 +1153,21 @@ static bool vxlan_remcsum(struct sk_buff *skb, u32 vxflags, __be32 vni_field) skb_remcsum_process(skb, (void *)(vxlan_hdr(skb) + 1), start, offset, !!(vxflags & VXLAN_F_REMCSUM_NOPARTIAL)); - +out: + unparsed->vx_flags &= ~VXLAN_HF_RCO; + unparsed->vx_vni &= VXLAN_VNI_MASK; return true; } -static void vxlan_parse_gbp_hdr(struct sk_buff *skb, struct vxlan_metadata *md, +static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, + struct vxlan_metadata *md, struct metadata_dst *tun_dst) { - struct vxlanhdr_gbp *gbp; + struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; + + if (!(unparsed->vx_flags & VXLAN_HF_GBP)) + goto out; - gbp = (struct vxlanhdr_gbp *)vxlan_hdr(skb); md->gbp = ntohs(gbp->policy_id); if (tun_dst) @@ -1172,6 +1178,9 @@ static void vxlan_parse_gbp_hdr(struct sk_buff *skb, struct vxlan_metadata *md, if (gbp->policy_applied) md->gbp |= VXLAN_GBP_POLICY_APPLIED; + +out: + unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, @@ -1273,7 +1282,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; struct vxlan_sock *vs; - __be32 flags, vni_field; + struct vxlanhdr unparsed; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; @@ -1281,11 +1290,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!pskb_may_pull(skb, VXLAN_HLEN)) goto error; - flags = vxlan_hdr(skb)->vx_flags; - vni_field = vxlan_hdr(skb)->vx_vni; - - if (flags & VXLAN_HF_VNI) { - flags &= ~VXLAN_HF_VNI; + unparsed = *vxlan_hdr(skb); + if (unparsed.vx_flags & VXLAN_HF_VNI) { + unparsed.vx_flags &= ~VXLAN_HF_VNI; + unparsed.vx_vni &= ~VXLAN_VNI_MASK; } else { /* VNI flag always required to be set */ goto bad_flags; @@ -1298,17 +1306,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!vs) goto drop; - if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { - if (!vxlan_remcsum(skb, vs->flags, vni_field)) - goto drop; - - flags &= ~VXLAN_HF_RCO; - vni_field &= VXLAN_VNI_MASK; - } - if (vxlan_collect_metadata(vs)) { tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, - vxlan_vni(vni_field), sizeof(*md)); + vxlan_vni(vxlan_hdr(skb)->vx_vni), + sizeof(*md)); if (!tun_dst) goto drop; @@ -1321,12 +1322,13 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) /* For backwards compatibility, only allow reserved fields to be * used by VXLAN extensions if explicitly requested. */ - if ((flags & VXLAN_HF_GBP) && (vs->flags & VXLAN_F_GBP)) { - vxlan_parse_gbp_hdr(skb, md, tun_dst); - flags &= ~VXLAN_GBP_USED_BITS; - } + if (vs->flags & VXLAN_F_REMCSUM_RX) + if (!vxlan_remcsum(&unparsed, skb, vs->flags)) + goto drop; + if (vs->flags & VXLAN_F_GBP) + vxlan_parse_gbp_hdr(&unparsed, md, tun_dst); - if (flags || vni_field & ~VXLAN_VNI_MASK) { + if (unparsed.vx_flags || unparsed.vx_vni) { /* If there are any unprocessed flags remaining treat * this as a malformed packet. This behavior diverges from * VXLAN RFC (RFC7348) which stipulates that bits in reserved @@ -1339,7 +1341,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto bad_flags; } - vxlan_rcv(vs, skb, md, vxlan_vni(vni_field), tun_dst); + vxlan_rcv(vs, skb, md, vxlan_vni(vxlan_hdr(skb)->vx_vni), tun_dst); return 0; drop: -- cgit v1.2.3 From 288b01c8c46eda01cd3921c6e3aca6293e57cee8 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:59:02 +0100 Subject: vxlan: clean up rx error path When there are unrecognized flags present in the vxlan header, it doesn't make much sense to return the packet for further UDP processing, especially considering that for other invalid flag combinations we drop the packet because of previous checks. This means we return positive value only at the beginning of the function where tun_dst is not yet allocated. This allows us to get rid of the bad_flags and error jump labels. When we're dropping packet, we need to free tun_dst now. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index ac688dc75c66..dfbb3cbd14b5 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1288,16 +1288,19 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) - goto error; + return 1; unparsed = *vxlan_hdr(skb); - if (unparsed.vx_flags & VXLAN_HF_VNI) { - unparsed.vx_flags &= ~VXLAN_HF_VNI; - unparsed.vx_vni &= ~VXLAN_VNI_MASK; - } else { - /* VNI flag always required to be set */ - goto bad_flags; + /* VNI flag always required to be set */ + if (!(unparsed.vx_flags & VXLAN_HF_VNI)) { + netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", + ntohl(vxlan_hdr(skb)->vx_flags), + ntohl(vxlan_hdr(skb)->vx_vni)); + /* Return non vxlan pkt */ + return 1; } + unparsed.vx_flags &= ~VXLAN_HF_VNI; + unparsed.vx_vni &= ~VXLAN_VNI_MASK; if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) goto drop; @@ -1337,29 +1340,19 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) * is more robust and provides a little more security in * adding extensions to VXLAN. */ - - goto bad_flags; + goto drop; } vxlan_rcv(vs, skb, md, vxlan_vni(vxlan_hdr(skb)->vx_vni), tun_dst); return 0; drop: - /* Consume bad packet */ - kfree_skb(skb); - return 0; - -bad_flags: - netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n", - ntohl(vxlan_hdr(skb)->vx_flags), - ntohl(vxlan_hdr(skb)->vx_vni)); - -error: if (tun_dst) dst_release((struct dst_entry *)tun_dst); - /* Return non vxlan pkt */ - return 1; + /* Consume bad packet */ + kfree_skb(skb); + return 0; } static int arp_reduce(struct net_device *dev, struct sk_buff *skb) -- cgit v1.2.3 From b9167b2e775999d267874b5a1a3a79020f54753a Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 16 Feb 2016 21:59:03 +0100 Subject: vxlan: treat vni in metadata based tunnels consistently For metadata based tunnels, VNI is ignored when doing vxlan device lookups (because such tunnel receives all VNIs). However, this was not honored by vxlan_xmit_one when doing encapsulation bypass. Move the check for metadata based tunnel to the common place where it belongs. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index dfbb3cbd14b5..3a84680b5117 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -246,6 +246,10 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, __be32 vni) { struct vxlan_dev *vxlan; + /* For flow based devices, map all packets to VNI 0 */ + if (vs->flags & VXLAN_F_COLLECT_METADATA) + vni = 0; + hlist_for_each_entry_rcu(vxlan, vni_head(vs, vni), hlist) { if (vxlan->default_dst.remote_vni == vni) return vxlan; @@ -1194,10 +1198,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, union vxlan_addr saddr; int err = 0; - /* For flow based devices, map all packets to VNI 0 */ - if (vs->flags & VXLAN_F_COLLECT_METADATA) - vni = 0; - /* Is this VNI defined? */ vxlan = vxlan_vs_find_vni(vs, vni); if (!vxlan) -- cgit v1.2.3 From d6b3bca11947888bc79e343be931b502040ab6a9 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 15 Jan 2016 14:33:08 -0800 Subject: i40e: get rid of magic number We have a define for this, use it. No functional change. Change-ID: Ic0e3ea4f562e46de63b2a8de07f291ccc10205fd Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 3e0d87e3ff3a..597f2092b597 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -461,7 +461,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id, rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT; /* set splitalways mode 10b */ - rx_ctx.dtype = 0x2; + rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT; } /* databuffer length validation */ -- cgit v1.2.3 From cb5c260e330054591294a9924c283ef1ddcd74a4 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 15 Jan 2016 14:33:09 -0800 Subject: i40e: drop unused debugfs file "dump" There was a completely unused file "dump" in debugfs that never panned out to be useful. Change-ID: I12bb9e37b5a83299725dda815a8746157baf6562 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 250 ------------------------- 1 file changed, 250 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 34da53bfb21b..631c2d6f8e7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -71,248 +71,6 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) return NULL; } -/************************************************************** - * dump - * The dump entry in debugfs is for getting a data snapshow of - * the driver's current configuration and runtime details. - * When the filesystem entry is written, a snapshot is taken. - * When the entry is read, the most recent snapshot data is dumped. - **************************************************************/ -static char *i40e_dbg_dump_buf; -static ssize_t i40e_dbg_dump_data_len; -static ssize_t i40e_dbg_dump_buffer_len; - -/** - * i40e_dbg_dump_read - read the dump data - * @filp: the opened file - * @buffer: where to write the data for the user to read - * @count: the size of the user's buffer - * @ppos: file position offset - **/ -static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer, - size_t count, loff_t *ppos) -{ - int bytes_not_copied; - int len; - - /* is *ppos bigger than the available data? */ - if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf) - return 0; - - /* be sure to not read beyond the end of available data */ - len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos)); - - bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len); - if (bytes_not_copied) - return -EFAULT; - - *ppos += len; - return len; -} - -/** - * i40e_dbg_prep_dump_buf - * @pf: the PF we're working with - * @buflen: the desired buffer length - * - * Return positive if success, 0 if failed - **/ -static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) -{ - /* if not already big enough, prep for re alloc */ - if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) { - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buffer_len = 0; - i40e_dbg_dump_buf = NULL; - } - - /* get a new buffer if needed */ - if (!i40e_dbg_dump_buf) { - i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL); - if (i40e_dbg_dump_buf != NULL) - i40e_dbg_dump_buffer_len = buflen; - } - - return i40e_dbg_dump_buffer_len; -} - -/** - * i40e_dbg_dump_write - trigger a datadump snapshot - * @filp: the opened file - * @buffer: where to find the user's data - * @count: the length of the user's data - * @ppos: file position offset - * - * Any write clears the stats - **/ -static ssize_t i40e_dbg_dump_write(struct file *filp, - const char __user *buffer, - size_t count, loff_t *ppos) -{ - struct i40e_pf *pf = filp->private_data; - bool seid_found = false; - long seid = -1; - int buflen = 0; - int i, ret; - int len; - u8 *p; - - /* don't allow partial writes */ - if (*ppos != 0) - return 0; - - /* decode the SEID given to be dumped */ - ret = kstrtol_from_user(buffer, count, 0, &seid); - - if (ret) { - dev_info(&pf->pdev->dev, "bad seid value\n"); - } else if (seid == 0) { - seid_found = true; - - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buffer_len = 0; - i40e_dbg_dump_data_len = 0; - i40e_dbg_dump_buf = NULL; - dev_info(&pf->pdev->dev, "debug buffer freed\n"); - - } else if (seid == pf->pf_seid || seid == 1) { - seid_found = true; - - buflen = sizeof(struct i40e_pf); - buflen += (sizeof(struct i40e_aq_desc) - * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); - - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - p = i40e_dbg_dump_buf; - - /* avoid use of memcpy here due to sparse warning - * about copy size. - */ - *((struct i40e_pf *)p) = *pf; - p += sizeof(struct i40e_pf); - - len = (sizeof(struct i40e_aq_desc) - * pf->hw.aq.num_asq_entries); - memcpy(p, pf->hw.aq.asq.desc_buf.va, len); - p += len; - - len = (sizeof(struct i40e_aq_desc) - * pf->hw.aq.num_arq_entries); - memcpy(p, pf->hw.aq.arq.desc_buf.va, len); - p += len; - - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "PF seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - } else if (seid >= I40E_BASE_VSI_SEID) { - struct i40e_vsi *vsi = NULL; - struct i40e_mac_filter *f; - int filter_count = 0; - - mutex_lock(&pf->switch_mutex); - vsi = i40e_dbg_find_vsi(pf, seid); - if (!vsi) { - mutex_unlock(&pf->switch_mutex); - goto write_exit; - } - - buflen = sizeof(struct i40e_vsi); - buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors; - buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs; - buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs; - buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs; - list_for_each_entry(f, &vsi->mac_filter_list, list) - filter_count++; - buflen += sizeof(struct i40e_mac_filter) * filter_count; - - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - p = i40e_dbg_dump_buf; - seid_found = true; - - len = sizeof(struct i40e_vsi); - memcpy(p, vsi, len); - p += len; - - if (vsi->num_q_vectors) { - len = (sizeof(struct i40e_q_vector) - * vsi->num_q_vectors); - memcpy(p, vsi->q_vectors, len); - p += len; - } - - if (vsi->num_queue_pairs) { - len = (sizeof(struct i40e_ring) * - vsi->num_queue_pairs); - memcpy(p, vsi->tx_rings, len); - p += len; - memcpy(p, vsi->rx_rings, len); - p += len; - } - - if (vsi->tx_rings[0]) { - len = sizeof(struct i40e_tx_buffer); - for (i = 0; i < vsi->num_queue_pairs; i++) { - memcpy(p, vsi->tx_rings[i]->tx_bi, len); - p += len; - } - len = sizeof(struct i40e_rx_buffer); - for (i = 0; i < vsi->num_queue_pairs; i++) { - memcpy(p, vsi->rx_rings[i]->rx_bi, len); - p += len; - } - } - - /* macvlan filter list */ - len = sizeof(struct i40e_mac_filter); - list_for_each_entry(f, &vsi->mac_filter_list, list) { - memcpy(p, f, len); - p += len; - } - - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "VSI seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - mutex_unlock(&pf->switch_mutex); - } else if (seid >= I40E_BASE_VEB_SEID) { - struct i40e_veb *veb = NULL; - - mutex_lock(&pf->switch_mutex); - veb = i40e_dbg_find_veb(pf, seid); - if (!veb) { - mutex_unlock(&pf->switch_mutex); - goto write_exit; - } - - buflen = sizeof(struct i40e_veb); - if (i40e_dbg_prep_dump_buf(pf, buflen)) { - seid_found = true; - memcpy(i40e_dbg_dump_buf, veb, buflen); - i40e_dbg_dump_data_len = buflen; - dev_info(&pf->pdev->dev, - "VEB seid %ld dumped %d bytes\n", - seid, (int)i40e_dbg_dump_data_len); - } - mutex_unlock(&pf->switch_mutex); - } - -write_exit: - if (!seid_found) - dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); - - return count; -} - -static const struct file_operations i40e_dbg_dump_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = i40e_dbg_dump_read, - .write = i40e_dbg_dump_write, -}; - /************************************************************** * command * The command entry in debugfs is for giving the driver commands @@ -2217,11 +1975,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf) if (!pfile) goto create_failed; - pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, - &i40e_dbg_dump_fops); - if (!pfile) - goto create_failed; - pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, &i40e_dbg_netdev_ops_fops); if (!pfile) @@ -2242,9 +1995,6 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf) { debugfs_remove_recursive(pf->i40e_dbg_pf); pf->i40e_dbg_pf = NULL; - - kfree(i40e_dbg_dump_buf); - i40e_dbg_dump_buf = NULL; } /** -- cgit v1.2.3 From 00e5ec4bcc4aa3d4663502b5377e17ea43e058d6 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 15 Jan 2016 14:33:10 -0800 Subject: i40evf: support packet split receive Support packet split receive on VFs. This is off by default but can be enabled using ethtool private flags. Because we need to trigger a reset from outside of i40evf_main.c, create a new function to do so, and export it. Also update copyright year in file headers. Change-ID: I721aa5d70113d3d6d94102e5f31526f6fc57cbbb Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf.h | 6 +- drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c | 67 +++++++++++++++++++++- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 64 +++++++++++---------- .../net/ethernet/intel/i40evf/i40evf_virtchnl.c | 4 ++ 4 files changed, 110 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 9e15f68d9ddd..e657eccd232c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -274,6 +274,9 @@ struct i40evf_adapter { }; +/* Ethtool Private Flags */ +#define I40EVF_PRIV_FLAGS_PS BIT(0) + /* needed by i40evf_ethtool.c */ extern char i40evf_driver_name[]; extern const char i40evf_driver_version[]; @@ -281,6 +284,7 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); int i40evf_process_config(struct i40evf_adapter *adapter); +void i40evf_schedule_reset(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index bd1c2728bc5c..dd4430aae7fa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2015 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -63,6 +63,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = { #define I40EVF_STATS_LEN(_dev) \ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev)) +static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = { + "packet-split", +}; + +#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings) + /** * i40evf_get_settings - Get Link Speed and Duplex settings * @netdev: network interface device structure @@ -97,6 +103,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset) { if (sset == ETH_SS_STATS) return I40EVF_STATS_LEN(netdev); + else if (sset == ETH_SS_PRIV_FLAGS) + return I40EVF_PRIV_FLAGS_STR_LEN; else return -EINVAL; } @@ -162,6 +170,12 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data) snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i); p += ETH_GSTRING_LEN; } + } else if (sset == ETH_SS_PRIV_FLAGS) { + for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) { + memcpy(data, i40evf_priv_flags_strings[i], + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } } } @@ -211,6 +225,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev, strlcpy(drvinfo->version, i40evf_driver_version, 32); strlcpy(drvinfo->fw_version, "N/A", 4); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); + drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN; } /** @@ -710,6 +725,54 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, I40EVF_HLUT_ARRAY_SIZE); } +/** + * i40evf_get_priv_flags - report device private flags + * @dev: network interface device structure + * + * The get string set count and the string set should be matched for each + * flag returned. Add new strings for each flag to the i40e_priv_flags_strings + * array. + * + * Returns a u32 bitmap of flags. + **/ +static u32 i40evf_get_priv_flags(struct net_device *dev) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + u32 ret_flags = 0; + + ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ? + I40EVF_PRIV_FLAGS_PS : 0; + + return ret_flags; +} + +/** + * i40evf_set_priv_flags - set private flags + * @dev: network interface device structure + * @flags: bit flags to be set + **/ +static int i40evf_set_priv_flags(struct net_device *dev, u32 flags) +{ + struct i40evf_adapter *adapter = netdev_priv(dev); + bool reset_required = false; + + if ((flags & I40EVF_PRIV_FLAGS_PS) && + !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { + adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; + reset_required = true; + } else if (!(flags & I40EVF_PRIV_FLAGS_PS) && + (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) { + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; + reset_required = true; + } + + /* if needed, issue reset to cause things to take effect */ + if (reset_required) + i40evf_schedule_reset(adapter); + + return 0; +} + static const struct ethtool_ops i40evf_ethtool_ops = { .get_settings = i40evf_get_settings, .get_drvinfo = i40evf_get_drvinfo, @@ -719,6 +782,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = { .get_strings = i40evf_get_strings, .get_ethtool_stats = i40evf_get_ethtool_stats, .get_sset_count = i40evf_get_sset_count, + .get_priv_flags = i40evf_get_priv_flags, + .set_priv_flags = i40evf_set_priv_flags, .get_msglevel = i40evf_get_msglevel, .set_msglevel = i40evf_set_msglevel, .get_coalesce = i40evf_get_coalesce, diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1d81d57c8266..1176326cfb01 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -172,6 +172,19 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...) pr_info("%s", buf); } +/** + * i40evf_schedule_reset - Set the flags and schedule a reset event + * @adapter: board private structure + **/ +void i40evf_schedule_reset(struct i40evf_adapter *adapter) +{ + if (!(adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } +} + /** * i40evf_tx_timeout - Respond to a Tx Hang * @netdev: network interface device structure @@ -181,11 +194,7 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | - I40EVF_FLAG_RESET_NEEDED))) { - adapter->flags |= I40EVF_FLAG_RESET_NEEDED; - queue_work(i40evf_wq, &adapter->reset_task); - } + i40evf_schedule_reset(adapter); } /** @@ -638,35 +647,22 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) int rx_buf_len; - adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE; - adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; - - /* Decide whether to use packet split mode or not */ - if (netdev->mtu > ETH_DATA_LEN) { - if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE) - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - else - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - } else { - if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE) - adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; - else - adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED; - } - /* Set the RX buffer length according to the mode */ - if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { - rx_buf_len = I40E_RX_HDR_SIZE; - } else { - if (netdev->mtu <= ETH_DATA_LEN) - rx_buf_len = I40EVF_RXBUFFER_2048; - else - rx_buf_len = ALIGN(max_frame, 1024); - } + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED || + netdev->mtu <= ETH_DATA_LEN) + rx_buf_len = I40EVF_RXBUFFER_2048; + else + rx_buf_len = ALIGN(max_frame, 1024); for (i = 0; i < adapter->num_active_queues; i++) { adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i].rx_buf_len = rx_buf_len; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + set_ring_ps_enabled(&adapter->rx_rings[i]); + adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE; + } else { + clear_ring_ps_enabled(&adapter->rx_rings[i]); + } } } @@ -1003,7 +999,12 @@ static void i40evf_configure(struct i40evf_adapter *adapter) for (i = 0; i < adapter->num_active_queues; i++) { struct i40e_ring *ring = &adapter->rx_rings[i]; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + i40evf_alloc_rx_headers(ring); + i40evf_alloc_rx_buffers_ps(ring, ring->count); + } else { i40evf_alloc_rx_buffers_1buf(ring, ring->count); + } ring->next_to_use = ring->count - 1; writel(ring->next_to_use, ring->tail); } @@ -2481,6 +2482,11 @@ static void i40evf_init_task(struct work_struct *work) adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED; + adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE; + adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE; + + /* Default to single buffer rx, can be changed through ethtool. */ + adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED; netdev->netdev_ops = &i40evf_netdev_ops; i40evf_set_ethtool_ops(netdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c index d3739cc5b608..488e738f76c6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c @@ -270,6 +270,10 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) vqpi->rxq.max_pkt_size = adapter->netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; + if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) { + vqpi->rxq.splithdr_enabled = true; + vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE; + } vqpi++; } -- cgit v1.2.3 From f734dfff2f2bc2d92c392fbcfe2039893a8cdd08 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Fri, 15 Jan 2016 14:33:11 -0800 Subject: i40e: trivial: cleanup use of pf->hw This patch makes use of a pointer called hw consistent in the i40e_remove function. Change-ID: Idacc7ff0a09a68289c57457a78618bf5497de077 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8d41c6c26850..161603b930c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11219,8 +11219,8 @@ static void i40e_remove(struct pci_dev *pdev) i40e_vsi_release(pf->vsi[pf->lan_vsi]); /* shutdown and destroy the HMC */ - if (pf->hw.hmc.hmc_obj) { - ret_code = i40e_shutdown_lan_hmc(&pf->hw); + if (hw->hmc.hmc_obj) { + ret_code = i40e_shutdown_lan_hmc(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the HMC resources: %d\n", @@ -11228,7 +11228,7 @@ static void i40e_remove(struct pci_dev *pdev) } /* shutdown the adminq */ - ret_code = i40e_shutdown_adminq(&pf->hw); + ret_code = i40e_shutdown_adminq(hw); if (ret_code) dev_warn(&pdev->dev, "Failed to destroy the Admin Queue resources: %d\n", @@ -11256,7 +11256,7 @@ static void i40e_remove(struct pci_dev *pdev) kfree(pf->qp_pile); kfree(pf->vsi); - iounmap(pf->hw.hw_addr); + iounmap(hw->hw_addr); kfree(pf); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); -- cgit v1.2.3 From dd353109e41c1e92e0cea9954404a6f5a7d46218 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 15 Jan 2016 14:33:12 -0800 Subject: i40e: Add a SW workaround for lost interrupts This patch adds a workaround for cases where we might have interrupts that got lost but WB happened. If that happens without this patch we will see a tx_timeout. To work around it, this patch goes ahead and reschedules NAPI in that situation, if NAPI is not already scheduled. We also add a counter in ethtool to keep track of when we detect a case of tx_lost_interrupt. Note: napi_reschedule() can be safely called from process/service_task context and is done in other drivers as well without an issue. Change-ID: I00f98f1ce3774524d9421227652bef20fcbd0d20 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 32 ++++++++++++++++---------- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 10 +++++--- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 3 ++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 10 +++++--- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 3 ++- 7 files changed, 40 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5ea431d04e8c..5d8e159e18d7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -512,6 +512,7 @@ struct i40e_vsi { u32 tx_busy; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; u32 rx_buf_failed; u32 rx_page_failed; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 230fa402d2da..5778f9e2f43c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -89,6 +89,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = { I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol), I40E_VSI_STAT("tx_linearize", tx_linearize), I40E_VSI_STAT("tx_force_wb", tx_force_wb), + I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt), I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed), I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed), }; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 161603b930c9..aa1c7ca59d53 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -819,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) struct i40e_eth_stats *oes; struct i40e_eth_stats *es; /* device's eth stats */ u32 tx_restart, tx_busy; + u64 tx_lost_interrupt; struct i40e_ring *p; u32 rx_page, rx_buf; u64 bytes, packets; @@ -844,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rx_b = rx_p = 0; tx_b = tx_p = 0; tx_restart = tx_busy = tx_linearize = tx_force_wb = 0; + tx_lost_interrupt = 0; rx_page = 0; rx_buf = 0; rcu_read_lock(); @@ -862,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) tx_busy += p->tx_stats.tx_busy; tx_linearize += p->tx_stats.tx_linearize; tx_force_wb += p->tx_stats.tx_force_wb; + tx_lost_interrupt += p->tx_stats.tx_lost_interrupt; /* Rx queue is part of the same block as Tx queue */ p = &p[1]; @@ -880,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) vsi->tx_busy = tx_busy; vsi->tx_linearize = tx_linearize; vsi->tx_force_wb = tx_force_wb; + vsi->tx_lost_interrupt = tx_lost_interrupt; vsi->rx_page_failed = rx_page; vsi->rx_buf_failed = rx_buf; @@ -4349,7 +4353,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) { struct i40e_ring *tx_ring = NULL; struct i40e_pf *pf; - u32 head, val, tx_pending; + u32 head, val, tx_pending_hw; int i; pf = vsi->back; @@ -4375,16 +4379,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) else val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0); - /* Bail out if interrupts are disabled because napi_poll - * execution in-progress or will get scheduled soon. - * napi_poll cleans TX and RX queues and updates 'next_to_clean'. - */ - if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)) - return; - head = i40e_get_head(tx_ring); - tx_pending = i40e_get_tx_pending(tx_ring); + tx_pending_hw = i40e_get_tx_pending(tx_ring, false); /* HW is done executing descriptors, updated HEAD write back, * but SW hasn't processed those descriptors. If interrupt is @@ -4392,12 +4389,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) * dev_watchdog detecting timeout on those netdev_queue, * hence proactively trigger SW interrupt. */ - if (tx_pending) { + if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { /* NAPI Poll didn't run and clear since it was set */ if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT, &tx_ring->q_vector->hung_detected)) { - netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", - vsi->seid, q_idx, tx_pending, + netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n", + vsi->seid, q_idx, tx_pending_hw, tx_ring->next_to_clean, head, tx_ring->next_to_use, readl(tx_ring->tail)); @@ -4410,6 +4407,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi) &tx_ring->q_vector->hung_detected); } } + + /* This is the case where we have interrupts missing, + * so the tx_pending in HW will most likely be 0, but we + * will have tx_pending in SW since the WB happened but the + * interrupt got lost. + */ + if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) && + (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) { + if (napi_reschedule(&tx_ring->q_vector->napi)) + tx_ring->tx_stats.tx_lost_interrupt++; + } } /** diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0ffa9a89986c..5b43585704fe 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) /** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40e_get_tx_pending(struct i40e_ring *ring) +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); + if (!in_sw) + head = i40e_get_head(ring); + else + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40e_get_tx_pending(tx_ring); + j = i40e_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j != 0) && diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index ae22c4e9162f..3acc9244134d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -203,6 +203,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { @@ -338,7 +339,7 @@ int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags); #endif void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40e_get_tx_pending(struct i40e_ring *ring); +u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 76bad75b0f67..0f73a4f2e9ed 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) /** * i40evf_get_tx_pending - how many Tx descriptors not processed * @tx_ring: the ring of descriptors + * @in_sw: is tx_pending being checked in SW or HW * * Since there is no access to the ring head register * in XL710, we need to use our local copies **/ -u32 i40evf_get_tx_pending(struct i40e_ring *ring) +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw) { u32 head, tail; - head = i40e_get_head(ring); + if (!in_sw) + head = i40e_get_head(ring); + else + head = ring->next_to_clean; tail = readl(ring->tail); if (head != tail) @@ -259,7 +263,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) * them to be written back in case we stay in NAPI. * In this mode on X722 we do not enable Interrupt. */ - j = i40evf_get_tx_pending(tx_ring); + j = i40evf_get_tx_pending(tx_ring, false); if (budget && ((j / (WB_STRIDE + 1)) == 0) && (j > 0) && diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 5467fcdf7670..81c96619287b 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -202,6 +202,7 @@ struct i40e_tx_queue_stats { u64 tx_done_old; u64 tx_linearize; u64 tx_force_wb; + u64 tx_lost_interrupt; }; struct i40e_rx_queue_stats { @@ -326,7 +327,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); -u32 i40evf_get_tx_pending(struct i40e_ring *ring); +u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); /** * i40e_get_head - Retrieve head from head writeback -- cgit v1.2.3 From 6784ed5a7385f15bc34903abffaca3db0f4e0438 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 15 Jan 2016 14:33:13 -0800 Subject: i40e: Fix PROMISC mode for Multi-function per port (MFP) devices This patch falls back to enabling unicast, multicast and broadcast promiscuous mode when the driver must disable it's use of "default port" aka defport mode (which is normally used to provide a promiscuous mode), due to internal incompatibility with Multiple Function per Port (aka MFP). The situation that requires this patch is when Physical Function 0 is the device being used, and it can support SR-IOV when MFP is enabled, via the driver creating a VEB on an MFP enabled adapter. Change-ID: Ie90b00d0d58782a5dfcf2c3c9725a2eb90bd63d8 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index aa1c7ca59d53..41d6d55416ca 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2122,7 +2122,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state)); - if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) { + if ((vsi->type == I40E_VSI_MAIN) && + (pf->lan_veb != I40E_NO_VEB) && + !(pf->flags & I40E_FLAG_MFP_ENABLED)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN * promisc behavior but will not get VF or VMDq traffic -- cgit v1.2.3 From 4147e2c5eec037dc51e4767b0ed42b32c6fecf9d Mon Sep 17 00:00:00 2001 From: Kiran Patil Date: Fri, 15 Jan 2016 14:33:14 -0800 Subject: i40e: Removal of code which relies on BASE VEB SEID Fixed mapping of SEID is removed from specification. Hence this patch removes code which was using hard coded base VEB SEID. Changed FCoE code to use "hw->pf_id" to obtain correct "idx" and verified. Removed defines for BASE VSI/VEB SEID and BASE_PF_SEID since it is not used anymore. Change-ID: Id507cf4b1fae1c0145e3f08ae9ea5846ea5840de Signed-off-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 16 +++------------- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- 3 files changed, 4 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5d8e159e18d7..e0758ddd2e22 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -64,9 +64,6 @@ #include "i40e_dcb.h" /* Useful i40e defaults */ -#define I40E_BASE_PF_SEID 16 -#define I40E_BASE_VSI_SEID 512 -#define I40E_BASE_VEB_SEID 288 #define I40E_MAX_VEB 16 #define I40E_MAX_NUM_DESCRIPTORS 4096 diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 631c2d6f8e7c..2a44f2e25a26 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -61,13 +61,9 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) { int i; - if ((seid < I40E_BASE_VEB_SEID) || - (seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB))) - dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - else - for (i = 0; i < I40E_MAX_VEB; i++) - if (pf->veb[i] && pf->veb[i]->seid == seid) - return pf->veb[i]; + for (i = 0; i < I40E_MAX_VEB; i++) + if (pf->veb[i] && pf->veb[i]->seid == seid) + return pf->veb[i]; return NULL; } @@ -691,12 +687,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) { struct i40e_veb *veb; - if ((seid < I40E_BASE_VEB_SEID) || - (seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) { - dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); - return; - } - veb = i40e_dbg_find_veb(pf, seid); if (!veb) { dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 41d6d55416ca..adb2a04930d6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -768,7 +768,7 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi) if (vsi->type != I40E_VSI_FCOE) return; - idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; + idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET; fs = &vsi->fcoe_stats; ofs = &vsi->fcoe_stats_offsets; -- cgit v1.2.3 From 16fd08b85962f6288fe1191856aa98cb0d40aa30 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 15 Jan 2016 14:33:15 -0800 Subject: i40e/i40evf: avoid atomics In the case where we have a page fully used by receive data, we need to release the page fully to the stack. Instead of calling get_page (which increments the page count) followed by free_page (which decrements the page count), just donate our reference to the stack. Although this donation is not tax deductible, it does allow us to avoid two very expensive atomic operations that reverse each other. Change-ID: If70739792d5748995fc175ec92ac2171ed4ad8fc Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 21 +++++++++++++-------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 21 +++++++++++++-------- 2 files changed, 26 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5b43585704fe..25e378c32375 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1663,28 +1663,33 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) rx_bi->page_offset + copysize, rx_packet_len, I40E_RXBUFFER_2048); - get_page(rx_bi->page); - /* switch to the other half-page here; the allocation - * code programs the right addr into HW. If we haven't - * used this half-page, the address won't be changed, - * and HW can just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; /* If the page count is more than 2, then both halves * of the page are used and we need to free it. Do it * here instead of in the alloc code. Otherwise one * of the half-pages might be released between now and * then, and we wouldn't know which one to use. + * Don't call get_page and free_page since those are + * both expensive atomic operations that just change + * the refcount in opposite directions. Just give the + * page to the stack; he can have our refcount. */ if (page_count(rx_bi->page) > 2) { dma_unmap_page(rx_ring->dev, rx_bi->page_dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(rx_bi->page); rx_bi->page = NULL; rx_bi->page_dma = 0; rx_ring->rx_stats.realloc_count++; + } else { + get_page(rx_bi->page); + /* switch to the other half-page here; the + * allocation code programs the right addr + * into HW. If we haven't used this half-page, + * the address won't be changed, and HW can + * just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; } } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 0f73a4f2e9ed..fb6cd7e5d3be 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1126,28 +1126,33 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget) rx_bi->page_offset + copysize, rx_packet_len, I40E_RXBUFFER_2048); - get_page(rx_bi->page); - /* switch to the other half-page here; the allocation - * code programs the right addr into HW. If we haven't - * used this half-page, the address won't be changed, - * and HW can just use it next time through. - */ - rx_bi->page_offset ^= PAGE_SIZE / 2; /* If the page count is more than 2, then both halves * of the page are used and we need to free it. Do it * here instead of in the alloc code. Otherwise one * of the half-pages might be released between now and * then, and we wouldn't know which one to use. + * Don't call get_page and free_page since those are + * both expensive atomic operations that just change + * the refcount in opposite directions. Just give the + * page to the stack; he can have our refcount. */ if (page_count(rx_bi->page) > 2) { dma_unmap_page(rx_ring->dev, rx_bi->page_dma, PAGE_SIZE, DMA_FROM_DEVICE); - __free_page(rx_bi->page); rx_bi->page = NULL; rx_bi->page_dma = 0; rx_ring->rx_stats.realloc_count++; + } else { + get_page(rx_bi->page); + /* switch to the other half-page here; the + * allocation code programs the right addr + * into HW. If we haven't used this half-page, + * the address won't be changed, and HW can + * just use it next time through. + */ + rx_bi->page_offset ^= PAGE_SIZE / 2; } } -- cgit v1.2.3 From a16ae2d59c290b0c545edb9bf41ac9cf88352ee0 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 15 Jan 2016 14:33:16 -0800 Subject: i40e: Do not disable queues in the Legacy/MSI Interrupt handler The queues should never be enabled/disabled in the interrupt handler, ICR0 interrupt enable should be the only thing that needs to be dynamically changed in the handler. This patch fixes that. Without this patch X722 platforms were seeing weird ping timings when in Legacy mode since it takes a whole lot of time for the HW/FW to re-enable queues. Change-ID: If065afc45d81c5a19d4a94a00cd5b8f61cefc40c Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 16 ++++++---------- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 13 ------------- 2 files changed, 6 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index adb2a04930d6..7869d74c8820 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3462,16 +3462,12 @@ static irqreturn_t i40e_intr(int irq, void *data) struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_q_vector *q_vector = vsi->q_vectors[0]; - /* temporarily disable queue cause for NAPI processing */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)); - - qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_RQCTL(0), qval); - - qval = rd32(hw, I40E_QINT_TQCTL(0)); - qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); - + /* We do not have a way to disarm Queue causes while leaving + * interrupt enabled for all other causes, ideally + * interrupt should be disabled while we are in NAPI but + * this is not a performance path and napi_schedule() + * can deal with rescheduling. + */ if (!test_bit(__I40E_DOWN, &pf->state)) napi_schedule_irqoff(&q_vector->napi); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 25e378c32375..65f2fd80aa79 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2051,19 +2051,6 @@ tx_only: if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { i40e_update_enable_itr(vsi, q_vector); } else { /* Legacy mode */ - struct i40e_hw *hw = &vsi->back->hw; - /* We re-enable the queue 0 cause, but - * don't worry about dynamic_enable - * because we left it on for the other - * possible interrupts during napi - */ - u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) | - I40E_QINT_RQCTL_CAUSE_ENA_MASK; - - wr32(hw, I40E_QINT_RQCTL(0), qval); - qval = rd32(hw, I40E_QINT_TQCTL(0)) | - I40E_QINT_TQCTL_CAUSE_ENA_MASK; - wr32(hw, I40E_QINT_TQCTL(0), qval); i40e_irq_dynamic_enable_icr0(vsi->back, false); } return 0; -- cgit v1.2.3 From 0d79032781caf8573d141259bd5008516439d634 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 15 Jan 2016 14:33:17 -0800 Subject: i40e: expand comment Explain why we cannot remove this code, even though it works differently than any of our other interrupt cause handling code. Change-ID: Ie66203bd037a466066036611c31d44f759ec5176 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 597f2092b597..5dcd19869a41 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2037,7 +2037,11 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf) if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) return 0; - /* re-enable vflr interrupt cause */ + /* Re-enable the VFLR interrupt cause here, before looking for which + * VF got reset. Otherwise, if another VF gets a reset while the + * first one is being processed, that interrupt will be lost, and + * that VF will be stuck in reset forever. + */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); -- cgit v1.2.3 From 6e93d0c90fb2e20fc5b9dbdf28b45e91a16b4353 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 15 Jan 2016 14:33:18 -0800 Subject: i40e: better error reporting for nvmupdate Make sure we return EBUSY while finishing up a reset, and add a few bits for better debug messages. Change-ID: I23f6c28a8d96d7aa171abcc265737cec7826c292 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 39 +++++++++++++------------- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 +++- 2 files changed, 24 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 5778f9e2f43c..9fc7546bfc9b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1005,16 +1005,19 @@ static int i40e_get_eeprom(struct net_device *netdev, /* check for NVMUpdate access method */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic && eeprom->magic != magic) { - struct i40e_nvm_access *cmd; - int errno; + struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; + int errno = 0; /* make sure it is the right magic for NVMUpdate */ if ((eeprom->magic >> 16) != hw->device_id) - return -EINVAL; + errno = -EINVAL; + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + errno = -EBUSY; + else + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - cmd = (struct i40e_nvm_access *)eeprom; - ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) + if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, @@ -1098,27 +1101,25 @@ static int i40e_set_eeprom(struct net_device *netdev, struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_hw *hw = &np->vsi->back->hw; struct i40e_pf *pf = np->vsi->back; - struct i40e_nvm_access *cmd; + struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom; int ret_val = 0; - int errno; + int errno = 0; u32 magic; /* normal ethtool set_eeprom is not supported */ magic = hw->vendor_id | (hw->device_id << 16); if (eeprom->magic == magic) - return -EOPNOTSUPP; - + errno = -EOPNOTSUPP; /* check for NVMUpdate access method */ - if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) - return -EINVAL; - - if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || - test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) - return -EBUSY; + else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) + errno = -EINVAL; + else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) + errno = -EBUSY; + else + ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - cmd = (struct i40e_nvm_access *)eeprom; - ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno); - if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM)) + if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) dev_info(&pf->pdev->dev, "NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n", ret_val, hw->aq.asq_last_status, errno, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 7869d74c8820..384912b5a9cc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3475,6 +3475,7 @@ static irqreturn_t i40e_intr(int irq, void *data) if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) { ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK; set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); + i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n"); } if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) { @@ -6332,7 +6333,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf) case i40e_aqc_opc_nvm_erase: case i40e_aqc_opc_nvm_update: case i40e_aqc_opc_oem_post_update: - i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); + i40e_debug(&pf->hw, I40E_DEBUG_NVM, + "ARQ NVM operation 0x%04x completed\n", + opcode); break; default: dev_info(&pf->pdev->dev, -- cgit v1.2.3 From 9b9344f7ce19c5c481f203a0af3e120b965afcd9 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Fri, 15 Jan 2016 14:33:19 -0800 Subject: i40evf: set adapter state on reset failure If a reset fails to complete, the driver gets its affairs in order and awaits the cold solace of rmmod. Unfortunately, it was not properly setting the adapter state, which would cause a panic on rmmod, instead of the desired surcease. Set the adapter state to DOWN in this case, and avoid a panic. Change-ID: I6fdd9906da52e023f8dc744f7da44b5d95278ca9 Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 1176326cfb01..cbb7507d40f4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -1883,6 +1883,7 @@ static void i40evf_reset_task(struct work_struct *work) adapter->netdev->flags &= ~IFF_UP; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + adapter->state = __I40EVF_DOWN; dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); return; /* Do not attempt to reinit. It's dead, Jim. */ } -- cgit v1.2.3 From 73b03f9848aa7556c8a10fd6ad4e64254e493445 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Fri, 15 Jan 2016 14:33:20 -0800 Subject: i40e: clean event descriptor before use In one obscure corner case, it was possible to clear the NVM update wait flag when no update_done message was actually received. This patch cleans the event descriptor before use, and moves the opcode check to where it won't get done if there was no event to clean. Also update copyright year in file headers. Change-ID: I68bbc41965e93f4adf07cbe98b9dfd63d41509a4 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq.c | 20 +++++++++++--------- drivers/net/ethernet/intel/i40evf/i40e_adminq.c | 5 ++++- 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 1fd5ea82a9bc..df8e2fd6a649 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -953,6 +953,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); @@ -1020,14 +1023,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_clean = ntc; hw->aq.arq.next_to_use = ntu; -clean_arq_element_out: - /* Set pending if needed, unlock and return */ - if (pending != NULL) - *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); - -clean_arq_element_err: - mutex_unlock(&hw->aq.arq_mutex); - if (i40e_is_nvm_update_op(&e->desc)) { if (hw->aq.nvm_release_on_done) { i40e_release_nvm(hw); @@ -1048,6 +1043,13 @@ clean_arq_element_err: } } +clean_arq_element_out: + /* Set pending if needed, unlock and return */ + if (pending) + *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc); +clean_arq_element_err: + mutex_unlock(&hw->aq.arq_mutex); + return ret_code; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c index 3f65e39b3fe4..44f7ed7583dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -887,6 +887,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, u16 flags; u16 ntu; + /* pre-clean the event info */ + memset(&e->desc, 0, sizeof(e->desc)); + /* take the lock before we start messing with the ring */ mutex_lock(&hw->aq.arq_mutex); -- cgit v1.2.3 From 3b1200891b7f41e5725d8659b6f87e1d2060272c Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Fri, 15 Jan 2016 14:33:21 -0800 Subject: i40e: When in promisc mode apply promisc mode to Tx Traffic as well In MFP mode particularly when we were setting the PF VSI in limited promiscuous, the HW switch was still mirroring the outgoing packets from other VSIs (VF/VMdq) onto the PF VSI. With this new bit set, the mirroring doesn't happen any more and so we are in limited promiscuous on the PF VSI in MFP which is similar to defport. An API check is not required, since this bit is reserved for FW API version < 1.5 Also update copyright year in file headers. Change-ID: I9840cb95f11dde733d943cb03ce84f68b9611bc8 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 3 ++- drivers/net/ethernet/intel/i40e/i40e_common.c | 9 ++++++++- drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 3 ++- 3 files changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 56992997a71c..bb7ecbbdf948 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1087,6 +1087,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 354e36cf2fff..f9239330f49d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1952,12 +1952,19 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (set) + if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + flags |= I40E_AQC_SET_VSI_PROMISC_TX; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1)) + cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index ed963bc09a82..815e481ccd9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver - * Copyright(c) 2013 - 2014 Intel Corporation. + * Copyright(c) 2013 - 2016 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1084,6 +1084,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 +#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 __le16 seid; #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF __le16 vlan_tag; -- cgit v1.2.3 From 8888fd8843ed501a26d9b74de0975b1ace663231 Mon Sep 17 00:00:00 2001 From: Catherine Sullivan Date: Fri, 15 Jan 2016 14:33:22 -0800 Subject: i40e/i40evf: Bump i40e to 1.4.15 and i40evf to 1.4.11. Bump. Change-ID: Ie280dc67e37a1cf667c3469499a4fb90f4177b75 Signed-off-by: Catherine Sullivan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 384912b5a9cc..16e5e0b81bd0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 13 +#define DRV_VERSION_BUILD 15 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index cbb7507d40f4..41369a30dfb8 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 9 +#define DRV_VERSION_BUILD 11 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From 096f6262c573dfd9ede478a2ad76e6b9b994c9a0 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Wed, 17 Feb 2016 22:35:11 -0800 Subject: isdn: divamnt: use y2038-safe ktime_get_ts64() for trace data timestamps divamnt stores a start_time at module init and uses it to calculate elapsed time. The elapsed time, stored in secs and usecs, is part of the trace data the driver maintains for the DIVA Server ISDN cards. No change to the format of that time data is required. To avoid overflow on 32-bit systems use ktime_get_ts64() to return the elapsed monotonic time since system boot. This is a change from real to monotonic time. Since the driver only stores elapsed time, monotonic time is sufficient and more robust against real time clock changes. These new monotonic values can be more useful for debugging because they can be easily compared to other monotonic timestamps. Note elaspsed time values will now start at system boot time rather than module load time, so they will differ slightly from previously reported values. Remove declaration and init of previously unused time constants: start_sec, start_usec. Signed-off-by: Alison Schofield Reviewed-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/isdn/hardware/eicon/debug.c | 4 ---- drivers/isdn/hardware/eicon/divamnt.c | 30 ++++++------------------------ 2 files changed, 6 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/isdn/hardware/eicon/debug.c b/drivers/isdn/hardware/eicon/debug.c index b5226af6ddec..576b7b4a3278 100644 --- a/drivers/isdn/hardware/eicon/debug.c +++ b/drivers/isdn/hardware/eicon/debug.c @@ -192,8 +192,6 @@ static diva_os_spin_lock_t dbg_q_lock; static diva_os_spin_lock_t dbg_adapter_lock; static int dbg_q_busy; static volatile dword dbg_sequence; -static dword start_sec; -static dword start_usec; /* INTERFACE: @@ -215,8 +213,6 @@ int diva_maint_init(byte *base, unsigned long length, int do_init) { dbg_base = base; - diva_os_get_time(&start_sec, &start_usec); - *(dword *)base = (dword)DBG_MAGIC; /* Store Magic */ base += sizeof(dword); length -= sizeof(dword); diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c index 48db08d0bb3d..0de29b7b712f 100644 --- a/drivers/isdn/hardware/eicon/divamnt.c +++ b/drivers/isdn/hardware/eicon/divamnt.c @@ -45,7 +45,6 @@ char *DRIVERRELEASE_MNT = "2.0"; static wait_queue_head_t msgwaitq; static unsigned long opened; -static struct timeval start_time; extern int mntfunc_init(int *, void **, unsigned long); extern void mntfunc_finit(void); @@ -88,28 +87,12 @@ int diva_os_copy_from_user(void *os_handle, void *dst, const void __user *src, */ void diva_os_get_time(dword *sec, dword *usec) { - struct timeval tv; - - do_gettimeofday(&tv); - - if (tv.tv_sec > start_time.tv_sec) { - if (start_time.tv_usec > tv.tv_usec) { - tv.tv_sec--; - tv.tv_usec += 1000000; - } - *sec = (dword) (tv.tv_sec - start_time.tv_sec); - *usec = (dword) (tv.tv_usec - start_time.tv_usec); - } else if (tv.tv_sec == start_time.tv_sec) { - *sec = 0; - if (start_time.tv_usec < tv.tv_usec) { - *usec = (dword) (tv.tv_usec - start_time.tv_usec); - } else { - *usec = 0; - } - } else { - *sec = (dword) tv.tv_sec; - *usec = (dword) tv.tv_usec; - } + struct timespec64 time; + + ktime_get_ts64(&time); + + *sec = (dword) time.tv_sec; + *usec = (dword) (time.tv_nsec / NSEC_PER_USEC); } /* @@ -213,7 +196,6 @@ static int __init maint_init(void) int ret = 0; void *buffer = NULL; - do_gettimeofday(&start_time); init_waitqueue_head(&msgwaitq); printk(KERN_INFO "%s\n", DRIVERNAME); -- cgit v1.2.3 From 930b37ee8d84e1f3008c89e9ce09acbe1a6b5670 Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 18 Feb 2016 10:59:07 +0100 Subject: net: phy: Add SGMII support for Marvell 88E1510/1512/1514/1518 Add code to select SGMII-to-copper mode upon SGMII interface selection. Signed-off-by: Stefan Roese Cc: Andrew Lunn Cc: Florian Fainelli Cc: David S. Miller Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index e3eb96443c97..1dcbd3ff9e38 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -133,6 +133,11 @@ #define MII_88E3016_DISABLE_SCRAMBLER 0x0200 #define MII_88E3016_AUTO_MDIX_CROSSOVER 0x0030 +#define MII_88E1510_GEN_CTRL_REG_1 0x14 +#define MII_88E1510_GEN_CTRL_REG_1_MODE_MASK 0x7 +#define MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII 0x1 /* SGMII to copper */ +#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */ + MODULE_DESCRIPTION("Marvell PHY driver"); MODULE_AUTHOR("Andy Fleming"); MODULE_LICENSE("GPL"); @@ -438,6 +443,41 @@ static int m88e1318_config_aneg(struct phy_device *phydev) return m88e1121_config_aneg(phydev); } +static int m88e1510_config_init(struct phy_device *phydev) +{ + int err; + int temp; + + /* SGMII-to-Copper mode initialization */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + /* Select page 18 */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18); + if (err < 0) + return err; + + /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */ + temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1); + temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK; + temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII; + err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + if (err < 0) + return err; + + /* PHY reset is necessary after changing MODE[2:0] */ + temp |= MII_88E1510_GEN_CTRL_REG_1_RESET; + err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + if (err < 0) + return err; + + /* Reset page selection */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0); + if (err < 0) + return err; + } + + return 0; +} + static int m88e1510_config_aneg(struct phy_device *phydev) { int err; @@ -1259,6 +1299,7 @@ static struct phy_driver marvell_drivers[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, + .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, .ack_interrupt = &marvell_ack_interrupt, -- cgit v1.2.3 From a9c9a81f5892eb984234223399ee624f7dbd15e8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:13 -0800 Subject: i40e/i40evf: Drop outer checksum offload that was not requested The i40e and i40evf drivers contained code for inserting an outer checksum on UDP tunnels. The issue however is that the upper levels of the stack never requested such an offload and it results in possible errors. In addition the same logic was being applied to the Rx side where it was attempting to validate the outer checksum, but the logic there was incorrect in that it was testing for the resultant sum to be equal to the header checksum instead of being equal to 0. Since this code is so massively flawed, and doing things that we didn't ask for it to do I am just dropping it, and will bring it back later to use as an offload for SKB_GSO_UDP_TUNNEL_CSUM which can make use of such a feature. As far as the Rx feature I am dropping it completely since it would need to be massively expanded and applied to IPv4 and IPv6 checksums for all parts, not just the one that supports Tx checksum offload for the outer. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 -- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 47 +++------------------------ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 46 +++----------------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 1 - 5 files changed, 10 insertions(+), 87 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 16e5e0b81bd0..0fa52ed1a896 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7474,8 +7474,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->dcb_tc = 0; if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; - if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) - tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 65f2fd80aa79..d4364ec33022 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1391,9 +1391,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); bool ipv4 = false, ipv6 = false; bool ipv4_tunnel, ipv6_tunnel; - __wsum rx_udp_csum; - struct iphdr *iph; - __sum16 csum; ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); @@ -1443,37 +1440,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN/GENEVE traffic has an outer UDPv4 checksum we need to check - * it in the driver, hardware does not do it for us. - * Since L3L4P bit was set we assume a valid IHL value (>=5) - * so the total length of IPv4 header is IHL*4 bytes - * The UDP_0 bit *may* bet set if the *inner* header is UDP + /* The hardware supported by this driver does not validate outer + * checksums for tunneled VXLAN or GENEVE frames. I don't agree + * with it but the specification states that you "MAY validate", it + * doesn't make it a hard requirement so if we have validated the + * inner checksum report CHECKSUM_UNNECESSARY. */ - if (!(vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) && - (ipv4_tunnel)) { - skb->transport_header = skb->mac_header + - sizeof(struct ethhdr) + - (ip_hdr(skb)->ihl * 4); - - /* Add 4 bytes for VLAN tagged packets */ - skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - ? VLAN_HLEN : 0; - - if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && - (udp_hdr(skb)->check != 0)) { - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); - - if (udp_hdr(skb)->check != csum) - goto checksum_fail; - - } /* else its GRE and so no outer UDP header */ - } skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; @@ -2453,15 +2425,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags |= I40E_TX_FLAGS_IPV6; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && - (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && - (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { - oudph->check = ~csum_tcpudp_magic(oiph->saddr, - oiph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, 0); - *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 3acc9244134d..fb065d4fe15c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -277,7 +277,6 @@ struct i40e_ring { u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) #define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2) /* stats structs */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index fb6cd7e5d3be..8b20ed3f20d1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -863,9 +863,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); bool ipv4 = false, ipv6 = false; bool ipv4_tunnel, ipv6_tunnel; - __wsum rx_udp_csum; - struct iphdr *iph; - __sum16 csum; ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); @@ -915,36 +912,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* If VXLAN traffic has an outer UDPv4 checksum we need to check - * it in the driver, hardware does not do it for us. - * Since L3L4P bit was set we assume a valid IHL value (>=5) - * so the total length of IPv4 header is IHL*4 bytes - * The UDP_0 bit *may* bet set if the *inner* header is UDP + /* The hardware supported by this driver does not validate outer + * checksums for tunneled VXLAN or GENEVE frames. I don't agree + * with it but the specification states that you "MAY validate", it + * doesn't make it a hard requirement so if we have validated the + * inner checksum report CHECKSUM_UNNECESSARY. */ - if (ipv4_tunnel) { - skb->transport_header = skb->mac_header + - sizeof(struct ethhdr) + - (ip_hdr(skb)->ihl * 4); - - /* Add 4 bytes for VLAN tagged packets */ - skb->transport_header += (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - ? VLAN_HLEN : 0; - - if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && - (udp_hdr(skb)->check != 0)) { - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - (skb->len - - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); - - if (udp_hdr(skb)->check != csum) - goto checksum_fail; - - } /* else its GRE and so no outer UDP header */ - } skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; @@ -1667,15 +1640,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags |= I40E_TX_FLAGS_IPV6; } - if ((tx_ring->flags & I40E_TXR_FLAGS_OUTER_UDP_CSUM) && - (l4_tunnel == I40E_TXD_CTX_UDP_TUNNELING) && - (*cd_tunneling & I40E_TXD_CTX_QW0_EXT_IP_MASK)) { - oudph->check = ~csum_tcpudp_magic(oiph->saddr, - oiph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, 0); - *cd_tunneling |= I40E_TXD_CTX_QW0_L4T_CS_MASK; - } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 81c96619287b..043b9556834f 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -275,7 +275,6 @@ struct i40e_ring { u16 flags; #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) -#define I40E_TXR_FLAGS_OUTER_UDP_CSUM BIT(1) /* stats structs */ struct i40e_queue_stats stats; -- cgit v1.2.3 From 03f9d6a59f94f70ae775ca2aae04f2accc01a973 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:20 -0800 Subject: i40e/i40evf: Use u64 values instead of casting them in TSO function Instead of casing u32 values to u64 it makes more sense to just start out with u64 values in the first place. This way we don't need to create a mess with all of the casts needed to populate a 64b value. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 ++++----- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 9 ++++----- 2 files changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d4364ec33022..5e825893f203 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2267,7 +2267,7 @@ out: static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { - u32 cd_cmd, cd_tso_len, cd_mss; + u64 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; struct tcphdr *tcph; struct iphdr *iph; @@ -2309,10 +2309,9 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 8b20ed3f20d1..c5f98cba648a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1527,7 +1527,7 @@ out: static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { - u32 cd_cmd, cd_tso_len, cd_mss; + u64 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; struct tcphdr *tcph; struct iphdr *iph; @@ -1569,10 +1569,9 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, cd_cmd = I40E_TX_CTX_DESC_TSO; cd_tso_len = skb->len - *hdr_len; cd_mss = skb_shinfo(skb)->gso_size; - *cd_type_cmd_tso_mss |= ((u64)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | - ((u64)cd_tso_len << - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | - ((u64)cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); + *cd_type_cmd_tso_mss |= (cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) | + (cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) | + (cd_mss << I40E_TXD_CTX_QW1_MSS_SHIFT); return 1; } -- cgit v1.2.3 From c49a7bc3308099a8d5f9e2e38adfc5ab969804aa Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:28 -0800 Subject: i40e/i40evf: Factor out L4 header and checksum from L3 bits in TSO path This patch makes it so that the L4 header offsets and such can be ignored when dealing with the L3 checksum and length update. This is done making use of two things. First we can just use the offset from the L4 header to the start of the packet to determine the L4 offset, and from that we can then make use of the data offset to determine the full length of the headers. As far as adjusting the checksum to remove the length we can simply add the inverse of the length instead of having to recompute the entire pseudo-header without the length. In the case of an IPv6 header this should be significantly cheaper since we can make use of a value we already needed instead of having to read the source and destination address out of the packet. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 31 ++++++++++++++++----------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 31 ++++++++++++++++----------- 2 files changed, 36 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5e825893f203..ce0234e67cc5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2269,9 +2269,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, { u64 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; - struct tcphdr *tcph; struct iphdr *iph; - u32 l4len; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -2286,24 +2289,26 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + l4.hdr = skb->encapsulation ? skb_inner_transport_header(skb) : + skb_transport_header(skb); if (iph->version == 4) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - } else if (ipv6h->version == 6) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + } else { ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - 0, IPPROTO_TCP, 0); } - l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = (skb->encapsulation - ? (skb_inner_transport_header(skb) - skb->data) - : skb_transport_offset(skb)) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = (__force u16)l4.tcp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.tcp->check = ~csum_fold((__force __wsum)paylen); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index c5f98cba648a..881d0ada99fc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1529,9 +1529,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, { u64 cd_cmd, cd_tso_len, cd_mss; struct ipv6hdr *ipv6h; - struct tcphdr *tcph; struct iphdr *iph; - u32 l4len; + union { + struct tcphdr *tcp; + unsigned char *hdr; + } l4; + u32 paylen, l4_offset; int err; if (skb->ip_summed != CHECKSUM_PARTIAL) @@ -1546,24 +1549,26 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + l4.hdr = skb->encapsulation ? skb_inner_transport_header(skb) : + skb_transport_header(skb); if (iph->version == 4) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; - tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, - 0, IPPROTO_TCP, 0); - } else if (ipv6h->version == 6) { - tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); + } else { ipv6h->payload_len = 0; - tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, - 0, IPPROTO_TCP, 0); } - l4len = skb->encapsulation ? inner_tcp_hdrlen(skb) : tcp_hdrlen(skb); - *hdr_len = (skb->encapsulation - ? (skb_inner_transport_header(skb) - skb->data) - : skb_transport_offset(skb)) + l4len; + /* determine offset of inner transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from inner checksum */ + paylen = (__force u16)l4.tcp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.tcp->check = ~csum_fold((__force __wsum)paylen); + + /* compute length of segmentation header */ + *hdr_len = (l4.tcp->doff * 4) + l4_offset; /* find the field values */ cd_cmd = I40E_TX_CTX_DESC_TSO; -- cgit v1.2.3 From c777019af1dc7343be8dc44bb4d32f5e2ef072dd Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:35 -0800 Subject: i40e/i40evf: Consolidate all header changes into TSO function This patch goes through and pulls all of the spots where we were updating either the TCP or IP checksums in the TSO and checksum path into the TSO function. The general idea here is that we should only be updating the header after we verify we have completed a skb_cow_head check to verify the head is writable. One other advantage to doing this is that it makes things much more obvious. For example, in the case of IPv6 there was one spot where the offset of the IPv4 header checksum was being updated which is obviously incorrect. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 44 ++++++++++++++++----------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 44 ++++++++++++++++----------- 2 files changed, 52 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index ce0234e67cc5..f47f9cbfefcc 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2268,8 +2268,11 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; - struct ipv6hdr *ipv6h; - struct iphdr *iph; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; union { struct tcphdr *tcp; unsigned char *hdr; @@ -2287,16 +2290,29 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - l4.hdr = skb->encapsulation ? skb_inner_transport_header(skb) : - skb_transport_header(skb); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); - if (iph->version == 4) { - iph->tot_len = 0; - iph->check = 0; + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; } else { - ipv6h->payload_len = 0; + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE)) { + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } } /* determine offset of inner transport header */ @@ -2381,15 +2397,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; - struct udphdr *oudph = NULL; - struct iphdr *oiph = NULL; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: - oudph = udp_hdr(skb); - oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; @@ -2407,15 +2419,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, if (*tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (*tx_flags & I40E_TX_FLAGS_TSO) - ip_hdr(skb)->check = 0; } /* Now set the ctx descriptor fields */ @@ -2444,7 +2453,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, */ if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - this_ip_hdr->check = 0; } else { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; } diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 881d0ada99fc..b5b25277ddac 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1528,8 +1528,11 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss) { u64 cd_cmd, cd_tso_len, cd_mss; - struct ipv6hdr *ipv6h; - struct iphdr *iph; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; union { struct tcphdr *tcp; unsigned char *hdr; @@ -1547,16 +1550,29 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); - l4.hdr = skb->encapsulation ? skb_inner_transport_header(skb) : - skb_transport_header(skb); + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); - if (iph->version == 4) { - iph->tot_len = 0; - iph->check = 0; + /* initialize outer IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; } else { - ipv6h->payload_len = 0; + ip.v6->payload_len = 0; + } + + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE)) { + /* reset pointers to inner headers */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); + + /* initialize inner IP header fields */ + if (ip.v4->version == 4) { + ip.v4->tot_len = 0; + ip.v4->check = 0; + } else { + ip.v6->payload_len = 0; + } } /* determine offset of inner transport header */ @@ -1598,15 +1614,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; u32 l4_tunnel = 0; if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { case IPPROTO_UDP: - oudph = udp_hdr(skb); - oiph = ip_hdr(skb); l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; @@ -1621,15 +1633,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, if (*tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_TSO) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - ip_hdr(skb)->check = 0; } else { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - if (*tx_flags & I40E_TX_FLAGS_TSO) - ip_hdr(skb)->check = 0; } /* Now set the ctx descriptor fields */ @@ -1659,7 +1668,6 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, */ if (*tx_flags & I40E_TX_FLAGS_TSO) { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - this_ip_hdr->check = 0; } else { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; } -- cgit v1.2.3 From b96b78f2b789ab8398e7ec0111bb8b4588ed42bf Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:42 -0800 Subject: i40e/i40evf: Replace header pointers with unions of pointers in Tx checksum path The Tx checksum path was maintaining a set of 3 pointers and two lengths in order to prepare the packet for being checksummed. The thing is we only really needed 2 pointers, and the lengths that were being maintained can easily be computed. As such we can replace the IPv4 and IPv6 header pointers with one single union that represents both, or a generic pointer to the start of the network header. For the L4 headers we can do the same with TCP and a generic pointer to the start of the transport header. The length of the TCP header is obtained by simply multiplying doff by 4, and the network header length can be obtained by subtracting the network header pointer from the transport header pointer. While I was at it I renamed l4_hdr to l4_proto to make it a bit more clear and less likely to be confused with l4.hdr which is the transport header pointer. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 51 +++++++++++++------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 52 +++++++++++++-------------- 2 files changed, 52 insertions(+), 51 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f47f9cbfefcc..5cc7e711068d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2392,12 +2392,21 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct i40e_ring *tx_ring, u32 *cd_tunneling) { - struct ipv6hdr *this_ipv6_hdr; - unsigned int this_tcp_hdrlen; - struct iphdr *this_ip_hdr; - u32 network_hdr_len; - u8 l4_hdr = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; u32 l4_tunnel = 0; + u8 l4_proto = 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { @@ -2411,10 +2420,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, default: return; } - network_hdr_len = skb_inner_network_header_len(skb); - this_ip_hdr = inner_ip_hdr(skb); - this_ipv6_hdr = inner_ipv6_hdr(skb); - this_tcp_hdrlen = inner_tcp_hdrlen(skb); + + /* switch L4 header pointer from outer to inner */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); if (*tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_TSO) { @@ -2434,20 +2443,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (this_ip_hdr->version == 6) { + if (ip.v6->version == 6) { *tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags |= I40E_TX_FLAGS_IPV6; } - } else { - network_hdr_len = skb_network_header_len(skb); - this_ip_hdr = ip_hdr(skb); - this_ipv6_hdr = ipv6_hdr(skb); - this_tcp_hdrlen = tcp_hdrlen(skb); } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_hdr = this_ip_hdr->protocol; + l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ @@ -2456,26 +2460,23 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } else { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; } - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_hdr = this_ipv6_hdr->nexthdr; + l4_proto = ip.v6->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; } + + /* Now set the td_offset for IP header length */ + *td_offset = ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ *td_offset |= (skb_network_offset(skb) >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; /* Enable L4 checksum offloads */ - switch (l4_hdr) { + switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (this_tcp_hdrlen >> 2) << + *td_offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index b5b25277ddac..29af3c9fc120 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1609,12 +1609,21 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct i40e_ring *tx_ring, u32 *cd_tunneling) { - struct ipv6hdr *this_ipv6_hdr; - unsigned int this_tcp_hdrlen; - struct iphdr *this_ip_hdr; - u32 network_hdr_len; - u8 l4_hdr = 0; + union { + struct iphdr *v4; + struct ipv6hdr *v6; + unsigned char *hdr; + } ip; + union { + struct tcphdr *tcp; + struct udphdr *udp; + unsigned char *hdr; + } l4; u32 l4_tunnel = 0; + u8 l4_proto = 0; + + ip.hdr = skb_network_header(skb); + l4.hdr = skb_transport_header(skb); if (skb->encapsulation) { switch (ip_hdr(skb)->protocol) { @@ -1625,10 +1634,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, default: return; } - network_hdr_len = skb_inner_network_header_len(skb); - this_ip_hdr = inner_ip_hdr(skb); - this_ipv6_hdr = inner_ipv6_hdr(skb); - this_tcp_hdrlen = inner_tcp_hdrlen(skb); + + /* switch L4 header pointer from outer to inner */ + ip.hdr = skb_inner_network_header(skb); + l4.hdr = skb_inner_transport_header(skb); if (*tx_flags & I40E_TX_FLAGS_IPV4) { if (*tx_flags & I40E_TX_FLAGS_TSO) { @@ -1648,21 +1657,15 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (this_ip_hdr->version == 6) { + if (ip.v6->version == 6) { *tx_flags &= ~I40E_TX_FLAGS_IPV4; *tx_flags |= I40E_TX_FLAGS_IPV6; } - - } else { - network_hdr_len = skb_network_header_len(skb); - this_ip_hdr = ip_hdr(skb); - this_ipv6_hdr = ipv6_hdr(skb); - this_tcp_hdrlen = tcp_hdrlen(skb); } /* Enable IP checksum offloads */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - l4_hdr = this_ip_hdr->protocol; + l4_proto = ip.v4->protocol; /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ @@ -1671,26 +1674,23 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, } else { *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; } - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_hdr = this_ipv6_hdr->nexthdr; + l4_proto = ip.v6->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; - /* Now set the td_offset for IP header length */ - *td_offset = (network_hdr_len >> 2) << - I40E_TX_DESC_LENGTH_IPLEN_SHIFT; } + + /* Now set the td_offset for IP header length */ + *td_offset = ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ *td_offset |= (skb_network_offset(skb) >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; /* Enable L4 checksum offloads */ - switch (l4_hdr) { + switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= (this_tcp_hdrlen >> 2) << + *td_offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: -- cgit v1.2.3 From a0064728f8a34f7a5afd9df86d9cdd8210977c8d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:48 -0800 Subject: i40e/i40evf: Add support for IPv4 encapsulated in IPv6 This patch fixes two issues. First was the fact that iphdr(skb)->protocl was being used to test for the outer transport protocol. This completely breaks IPv6 support. Second was the fact that we cleared the flag for v4 going to v6, but we didn't take care of txflags going the other way. As such we would have the v6 flag still set even if the inner header was v4. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 38 +++++++++++++++---------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 41 +++++++++++++++++---------- 2 files changed, 49 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5cc7e711068d..1404cae04b83 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2409,13 +2409,28 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4.hdr = skb_transport_header(skb); if (skb->encapsulation) { - switch (ip_hdr(skb)->protocol) { + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + else + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + l4_proto = ip.v6->nexthdr; + } + + /* define outer transport */ + switch (l4_proto) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; default: return; @@ -2424,17 +2439,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* switch L4 header pointer from outer to inner */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); - - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - } + l4_proto = 0; /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << @@ -2443,10 +2448,13 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (ip.v6->version == 6) { - *tx_flags &= ~I40E_TX_FLAGS_IPV4; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) *tx_flags |= I40E_TX_FLAGS_IPV6; - } } /* Enable IP checksum offloads */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 29af3c9fc120..39d5f807f08c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1626,11 +1626,29 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4.hdr = skb_transport_header(skb); if (skb->encapsulation) { - switch (ip_hdr(skb)->protocol) { + /* define outer network header type */ + if (*tx_flags & I40E_TX_FLAGS_IPV4) { + if (*tx_flags & I40E_TX_FLAGS_TSO) + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; + else + *cd_tunneling |= + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; + } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + l4_proto = ip.v6->nexthdr; + } + + /* define outer transport */ + switch (l4_proto) { case IPPROTO_UDP: l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; + case IPPROTO_GRE: + l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; + break; default: return; } @@ -1638,17 +1656,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* switch L4 header pointer from outer to inner */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); - - if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } - } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; - } + l4_proto = 0; /* Now set the ctx descriptor fields */ *cd_tunneling |= (skb_network_header_len(skb) >> 2) << @@ -1657,10 +1665,13 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; - if (ip.v6->version == 6) { - *tx_flags &= ~I40E_TX_FLAGS_IPV4; + + /* reset type as we transition from outer to inner headers */ + *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); + if (ip.v4->version == 4) + *tx_flags |= I40E_TX_FLAGS_IPV4; + if (ip.v6->version == 6) *tx_flags |= I40E_TX_FLAGS_IPV6; - } } /* Enable IP checksum offloads */ -- cgit v1.2.3 From a3fd9d8876a589f05725237aced606b995956860 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:16:54 -0800 Subject: i40e/i40evf: Handle IPv6 extension headers in checksum offload This patch adds support for IPv6 extension headers in setting up the Tx checksum. Without this patch extension headers would cause IPv6 traffic to fail as the transport protocol could not be identified. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 14 +++++++++++++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 14 +++++++++++++- 2 files changed, 26 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1404cae04b83..e49fe8f580b1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2402,7 +2402,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct udphdr *udp; unsigned char *hdr; } l4; + unsigned char *exthdr; u32 l4_tunnel = 0; + __be16 frag_off; u8 l4_proto = 0; ip.hdr = skb_network_header(skb); @@ -2419,7 +2421,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } /* define outer transport */ @@ -2469,8 +2476,13 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; } } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_proto = ip.v6->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } /* Now set the td_offset for IP header length */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 39d5f807f08c..48ec7631b3dd 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1619,7 +1619,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct udphdr *udp; unsigned char *hdr; } l4; + unsigned char *exthdr; u32 l4_tunnel = 0; + __be16 frag_off; u8 l4_proto = 0; ip.hdr = skb_network_header(skb); @@ -1636,7 +1638,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } /* define outer transport */ @@ -1686,8 +1693,13 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; } } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - l4_proto = ip.v6->nexthdr; *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + + exthdr = ip.hdr + sizeof(*ip.v6); + l4_proto = ip.v6->nexthdr; + if (l4.hdr != exthdr) + ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); } /* Now set the td_offset for IP header length */ -- cgit v1.2.3 From 07dabf20d9867710b90b91108b2adcd448773e25 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 18 Feb 2016 19:19:29 +0100 Subject: vxlan: tun_id is 64bit, not 32bit The tun_id field in struct ip_tunnel_key is __be64, not __be32. We need to convert the vni to tun_id correctly. Fixes: 54bfd872bf16 ("vxlan: keep flags and vni in network byte order") Reported-by: Paolo Abeni Tested-by: Paolo Abeni Signed-off-by: Jiri Benc Acked-by: Thadeu Lima de Souza Cascardo Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 5 +++-- include/net/vxlan.h | 9 +++++++++ 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 3a84680b5117..75bccb360599 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1310,9 +1310,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; if (vxlan_collect_metadata(vs)) { + __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); + tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, - vxlan_vni(vxlan_hdr(skb)->vx_vni), - sizeof(*md)); + vxlan_vni_to_tun_id(vni), sizeof(*md)); if (!tun_dst) goto drop; diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 1b85a3b40c5a..748083de367a 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -294,6 +294,15 @@ static inline __be32 vxlan_tun_id_to_vni(__be64 tun_id) #endif } +static inline __be64 vxlan_vni_to_tun_id(__be32 vni) +{ +#if defined(__BIG_ENDIAN) + return (__be64)vni; +#else + return (__be64)vni << 32; +#endif +} + static inline size_t vxlan_rco_start(__be32 vni_field) { return be32_to_cpu(vni_field & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; -- cgit v1.2.3 From 475b4205aa52c16feef08d55c8fd76e815b6bee7 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:01 -0800 Subject: i40e/i40evf: Do not write to descriptor unless we complete This patch defers writing to the Tx descriptor bits until we know we have successfully completed a given operation. So for example we defer updating the tunnelling portion of the context descriptor until we have fully identified the type. The advantage to this approach is that we can assemble values as we go instead of having to try and kludge everything together all at once. As a result we can significantly clean up the tunneling configuration for instance as we can just do a pointer walk and do the math for the distance between each set of points. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 80 ++++++++++++++------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 80 ++++++++++++++------------- 2 files changed, 84 insertions(+), 76 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index e49fe8f580b1..5b591b865fd5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2403,24 +2403,26 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 l4_tunnel = 0; + u32 offset, cmd = 0, tunnel = 0; __be16 frag_off; u8 l4_proto = 0; ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + if (skb->encapsulation) { /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - else - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -2429,33 +2431,38 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: - l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; case IPPROTO_GRE: - l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; default: return; } + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + /* switch L4 header pointer from outer to inner */ - ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); l4_proto = 0; - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - l4_tunnel | - ((skb_inner_network_offset(skb) - - skb_transport_offset(skb)) >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - /* reset type as we transition from outer to inner headers */ *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); if (ip.v4->version == 4) @@ -2470,13 +2477,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - } else { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; - } + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -2485,35 +2490,34 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* Now set the td_offset for IP header length */ - *td_offset = ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ - *td_offset |= (skb_network_offset(skb) >> 1) << - I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= l4.tcp->doff << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: break; } + + *td_cmd |= cmd; + *td_offset |= offset; } /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 48ec7631b3dd..0ee13f6619c4 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1620,24 +1620,26 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, unsigned char *hdr; } l4; unsigned char *exthdr; - u32 l4_tunnel = 0; + u32 offset, cmd = 0, tunnel = 0; __be16 frag_off; u8 l4_proto = 0; ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); + /* compute outer L2 header size */ + offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + if (skb->encapsulation) { /* define outer network header type */ if (*tx_flags & I40E_TX_FLAGS_IPV4) { - if (*tx_flags & I40E_TX_FLAGS_TSO) - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4; - else - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_CTX_EXT_IP_IPV4 : + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; + l4_proto = ip.v4->protocol; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + tunnel |= I40E_TX_CTX_EXT_IP_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -1646,33 +1648,38 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } + /* compute outer L3 header size */ + tunnel |= ((l4.hdr - ip.hdr) / 4) << + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT; + + /* switch IP header pointer from outer to inner header */ + ip.hdr = skb_inner_network_header(skb); + /* define outer transport */ switch (l4_proto) { case IPPROTO_UDP: - l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING; + tunnel |= I40E_TXD_CTX_UDP_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; case IPPROTO_GRE: - l4_tunnel = I40E_TXD_CTX_GRE_TUNNELING; + tunnel |= I40E_TXD_CTX_GRE_TUNNELING; *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: return; } + /* compute tunnel header size */ + tunnel |= ((ip.hdr - l4.hdr) / 2) << + I40E_TXD_CTX_QW0_NATLEN_SHIFT; + + /* record tunnel offload values */ + *cd_tunneling |= tunnel; + /* switch L4 header pointer from outer to inner */ - ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); l4_proto = 0; - /* Now set the ctx descriptor fields */ - *cd_tunneling |= (skb_network_header_len(skb) >> 2) << - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT | - l4_tunnel | - ((skb_inner_network_offset(skb) - - skb_transport_offset(skb)) >> 1) << - I40E_TXD_CTX_QW0_NATLEN_SHIFT; - /* reset type as we transition from outer to inner headers */ *tx_flags &= ~(I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6); if (ip.v4->version == 4) @@ -1687,13 +1694,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, /* the stack computes the IP header already, the only time we * need the hardware to recompute it is in the case of TSO. */ - if (*tx_flags & I40E_TX_FLAGS_TSO) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM; - } else { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4; - } + cmd |= (*tx_flags & I40E_TX_FLAGS_TSO) ? + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM : + I40E_TX_DESC_CMD_IIPT_IPV4; } else if (*tx_flags & I40E_TX_FLAGS_IPV6) { - *td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; + cmd |= I40E_TX_DESC_CMD_IIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; @@ -1702,35 +1707,34 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, &l4_proto, &frag_off); } - /* Now set the td_offset for IP header length */ - *td_offset = ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; - /* words in MACLEN + dwords in IPLEN + dwords in L4Len */ - *td_offset |= (skb_network_offset(skb) >> 1) << - I40E_TX_DESC_LENGTH_MACLEN_SHIFT; + /* compute inner L3 header size */ + offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; /* Enable L4 checksum offloads */ switch (l4_proto) { case IPPROTO_TCP: /* enable checksum offloads */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; - *td_offset |= l4.tcp->doff << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP; + offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_SCTP: /* enable SCTP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; - *td_offset |= (sizeof(struct sctphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_SCTP; + offset |= (sizeof(struct sctphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; case IPPROTO_UDP: /* enable UDP checksum offload */ - *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; - *td_offset |= (sizeof(struct udphdr) >> 2) << - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + cmd |= I40E_TX_DESC_CMD_L4T_EOFT_UDP; + offset |= (sizeof(struct udphdr) >> 2) << + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: break; } + + *td_cmd |= cmd; + *td_offset |= offset; } /** -- cgit v1.2.3 From 529f1f652e3c3c6db6ab5a6e3a35469ddfd9575d Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:10 -0800 Subject: i40e/i40evf: Add exception handling for Tx checksum Add exception handling to the Tx checksum path so that we can handle cases of TSO where the frame is bad, or Tx checksum where we didn't recognize a protocol Drop I40E_TX_FLAGS_CSUM as it is unused, move the CHECKSUM_PARTIAL check into the function itself so that we can decrease indent. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 34 +++++++++++++++++--------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 - drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 35 ++++++++++++++++++--------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 1 - 4 files changed, 45 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5b591b865fd5..6b08b0fa6dcf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2387,10 +2387,10 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) { union { struct iphdr *v4; @@ -2407,6 +2407,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, __be16 frag_off; u8 l4_proto = 0; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); @@ -2449,7 +2452,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL; break; default: - return; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + + skb_checksum_help(skb); + return 0; } /* compute tunnel header size */ @@ -2513,11 +2520,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - break; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; } *td_cmd |= cmd; *td_offset |= offset; + + return 1; } /** @@ -2954,12 +2966,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, td_cmd |= I40E_TX_DESC_CMD_ICRC; /* Always offload the checksum, since it's in the data descriptor */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - tx_flags |= I40E_TX_FLAGS_CSUM; - - i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - } + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index fb065d4fe15c..fde5f42524fb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -153,7 +153,6 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM BIT(0) #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 0ee13f6619c4..143c5703f54e 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1602,12 +1602,13 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, * @tx_flags: pointer to Tx flags currently set * @td_cmd: Tx descriptor command bits to set * @td_offset: Tx descriptor header offsets to set + * @tx_ring: Tx descriptor ring * @cd_tunneling: ptr to context desc bits **/ -static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, - u32 *td_cmd, u32 *td_offset, - struct i40e_ring *tx_ring, - u32 *cd_tunneling) +static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, + u32 *td_cmd, u32 *td_offset, + struct i40e_ring *tx_ring, + u32 *cd_tunneling) { union { struct iphdr *v4; @@ -1624,6 +1625,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, __be16 frag_off; u8 l4_proto = 0; + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + ip.hdr = skb_network_header(skb); l4.hdr = skb_transport_header(skb); @@ -1666,7 +1670,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL; break; default: - return; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + + skb_checksum_help(skb); + return 0; } /* compute tunnel header size */ @@ -1730,11 +1738,16 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; break; default: - break; + if (*tx_flags & I40E_TX_FLAGS_TSO) + return -1; + skb_checksum_help(skb); + return 0; } *td_cmd |= cmd; *td_offset |= offset; + + return 1; } /** @@ -2150,12 +2163,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, td_cmd |= I40E_TX_DESC_CMD_ICRC; /* Always offload the checksum, since it's in the data descriptor */ - if (skb->ip_summed == CHECKSUM_PARTIAL) { - tx_flags |= I40E_TX_FLAGS_CSUM; - - i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - } + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 043b9556834f..6ea8701cf066 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -153,7 +153,6 @@ enum i40e_dyn_idx_t { #define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define I40E_MIN_DESC_PENDING 4 -#define I40E_TX_FLAGS_CSUM BIT(0) #define I40E_TX_FLAGS_HW_VLAN BIT(1) #define I40E_TX_FLAGS_SW_VLAN BIT(2) #define I40E_TX_FLAGS_TSO BIT(3) -- cgit v1.2.3 From fc08a01a6925a1a0d69bb9026f266606a6a96a20 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 16 Feb 2016 10:07:09 +0530 Subject: cxgb4: Use __dev_uc_sync/__dev_mc_sync to sync MAC address Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4.h | 27 +++++ drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 138 ++++++++++++++---------- drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | 92 +++++++++++++--- 3 files changed, 184 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index ec6e849676c1..1dac6c6111bf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -702,6 +702,11 @@ struct doorbell_stats { u32 db_full; }; +struct hash_mac_addr { + struct list_head list; + u8 addr[ETH_ALEN]; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -740,6 +745,7 @@ struct adapter { void *uld_handle[CXGB4_ULD_MAX]; struct list_head list_node; struct list_head rcu_node; + struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ struct tid_info tids; void **tid_release_head; @@ -1207,6 +1213,24 @@ static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); } +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by HW inexact + * (hash) address matching. + */ +static inline int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1389,6 +1413,9 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, bool free, unsigned int naddr, const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); +int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, unsigned int naddr, + const u8 **addr, bool sleep_ok); int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, int idx, const u8 *addr, bool persist, bool add_smt); int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index b8a5fb0c32d4..adad73f7c8cd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -338,84 +338,108 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id) netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); } +int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ +module_param(dbfifo_int_thresh, int, 0644); +MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); + /* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. + * usecs to sleep while draining the dbfifo */ -static int set_addr_filters(const struct net_device *dev, bool sleep) +static int dbfifo_drain_delay = 1000; +module_param(dbfifo_drain_delay, int, 0644); +MODULE_PARM_DESC(dbfifo_drain_delay, + "usecs to sleep while draining the dbfifo"); + +static inline int cxgb4_set_addr_hash(struct port_info *pi) { + struct adapter *adap = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; + + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adap->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast, + vec, false); +} + +static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + int ret; u64 mhash = 0; u64 uhash = 0; - bool free = true; - u16 filt_idx[7]; - const u8 *addr[7]; - int ret, naddr = 0; - const struct netdev_hw_addr *ha; - int uc_cnt = netdev_uc_count(dev); - int mc_cnt = netdev_mc_count(dev); - const struct port_info *pi = netdev_priv(dev); - unsigned int mb = pi->adapter->pf; + bool free = false; + bool ucast = is_unicast_ether_addr(mac_addr); + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *new_entry; - /* first do the secondary unicast addresses */ - netdev_for_each_uc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &uhash, sleep); - if (ret < 0) - return ret; - - free = false; - naddr = 0; - } + ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist, + NULL, ucast ? &uhash : &mhash, false); + if (ret < 0) + goto out; + /* if hash != 0, then add the addr to hash addr list + * so on the end we will calculate the hash for the + * list and program it + */ + if (uhash || mhash) { + new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, mac_addr); + list_add_tail(&new_entry->list, &adap->mac_hlist); + ret = cxgb4_set_addr_hash(pi); } +out: + return ret < 0 ? ret : 0; +} - /* next set up the multicast addresses */ - netdev_for_each_mc_addr(ha, dev) { - addr[naddr++] = ha->addr; - if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { - ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, - naddr, addr, filt_idx, &mhash, sleep); - if (ret < 0) - return ret; +static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adap = pi->adapter; + int ret; + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *entry, *tmp; - free = false; - naddr = 0; + /* If the MAC address to be removed is in the hash addr + * list, delete it from the list and update hash vector + */ + list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) { + if (ether_addr_equal(entry->addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + return cxgb4_set_addr_hash(pi); } } - return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, - uhash | mhash, sleep); + ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false); + return ret < 0 ? -EINVAL : 0; } -int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ -module_param(dbfifo_int_thresh, int, 0644); -MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); - -/* - * usecs to sleep while draining the dbfifo - */ -static int dbfifo_drain_delay = 1000; -module_param(dbfifo_drain_delay, int, 0644); -MODULE_PARM_DESC(dbfifo_drain_delay, - "usecs to sleep while draining the dbfifo"); - /* * Set Rx properties of a port, such as promiscruity, address filters, and MTU. * If @mtu is -1 it is left unchanged. */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { - int ret; struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu, - (dev->flags & IFF_PROMISC) ? 1 : 0, - (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, - sleep_ok); - return ret; + if (!(dev->flags & IFF_PROMISC)) { + __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + if (!(dev->flags & IFF_ALLMULTI)) + __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync); + } + + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu, + (dev->flags & IFF_PROMISC) ? 1 : 0, + (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, + sleep_ok); } /** @@ -2677,6 +2701,8 @@ static int cxgb_up(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) update_clip(adap); #endif + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adap->mac_hlist); out: return err; irq_err: diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 636b4691f252..cc1736bece0f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -4432,23 +4432,6 @@ void t4_intr_disable(struct adapter *adapter) t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0); } -/** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by HW inexact - * (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - /** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter @@ -6737,6 +6720,81 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, return ret; } +/** + * t4_free_mac_filt - frees exact-match filters of given MAC addresses + * @adap: the adapter + * @mbox: mailbox to use for the FW command + * @viid: the VI id + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @sleep_ok: call is allowed to sleep + * + * Frees the exact-match filter for each of the supplied addresses + * + * Returns a negative error number or the number of filters freed. + */ +int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, + unsigned int viid, unsigned int naddr, + const u8 **addr, bool sleep_ok) +{ + int offset, ret = 0; + struct fw_vi_mac_cmd c; + unsigned int nfilters = 0; + unsigned int max_naddr = is_t4(adap->params.chip) ? + NUM_MPS_CLS_SRAM_L_INSTANCES : + NUM_MPS_T5_CLS_SRAM_L_INSTANCES; + unsigned int rem = naddr; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < (int)naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) + ? rem + : ARRAY_SIZE(c.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&c, 0, sizeof(c)); + c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + c.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); + if (ret) + break; + + for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (index < max_naddr) + nfilters++; + } + + offset += fw_naddr; + rem -= fw_naddr; + } + + if (ret == 0) + ret = nfilters; + return ret; +} + /** * t4_change_mac - modifies the exact-match filter for a MAC address * @adap: the adapter -- cgit v1.2.3 From fe5d2709b09a38ac171df1438b88769a0767b8b2 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 16 Feb 2016 10:07:10 +0530 Subject: cxgb4vf: Use __dev_uc_sync/__dev_mc_sync to sync MAC address Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/adapter.h | 8 ++ .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 116 +++++++++++++-------- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 20 ++++ drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 88 +++++++++++++--- 4 files changed, 171 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h index 6049f70e110c..4a707c32d76f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h @@ -348,6 +348,11 @@ struct sge { #define for_each_ethrxq(sge, iter) \ for (iter = 0; iter < (sge)->ethqsets; iter++) +struct hash_mac_addr { + struct list_head list; + u8 addr[ETH_ALEN]; +}; + /* * Per-"adapter" (Virtual Function) information. */ @@ -381,6 +386,9 @@ struct adapter { /* various locks */ spinlock_t stats_lock; + + /* list of MAC addresses in MPS Hash */ + struct list_head mac_hlist; }; enum { /* adapter flags */ diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 0cfa5d72cafd..8337514ababb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -741,6 +741,9 @@ static int adapter_up(struct adapter *adapter) */ enable_rx(adapter); t4vf_sge_start(adapter); + + /* Initialize hash mac addr list*/ + INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } @@ -905,51 +908,74 @@ static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device return naddr; } -/* - * Configure the exact and hash address filters to handle a port's multicast - * and secondary unicast MAC addresses. - */ -static int set_addr_filters(const struct net_device *dev, bool sleep) +static inline int cxgb4vf_set_addr_hash(struct port_info *pi) { - u64 mhash = 0; - u64 uhash = 0; - bool free = true; - unsigned int offset, naddr; - const u8 *addr[7]; - int ret; - const struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + u64 vec = 0; + bool ucast = false; + struct hash_mac_addr *entry; - /* first do the secondary unicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_uc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; + /* Calculate the hash vector for the updated list and program it */ + list_for_each_entry(entry, &adapter->mac_hlist, list) { + ucast |= is_unicast_ether_addr(entry->addr); + vec |= (1ULL << hash_mac_addr(entry->addr)); + } + return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false); +} - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &uhash, sleep); - if (ret < 0) - return ret; +static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + int ret; + u64 mhash = 0; + u64 uhash = 0; + bool free = false; + bool ucast = is_unicast_ether_addr(mac_addr); + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *new_entry; - free = false; + ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist, + NULL, ucast ? &uhash : &mhash, false); + if (ret < 0) + goto out; + /* if hash != 0, then add the addr to hash addr list + * so on the end we will calculate the hash for the + * list and program it + */ + if (uhash || mhash) { + new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); + if (!new_entry) + return -ENOMEM; + ether_addr_copy(new_entry->addr, mac_addr); + list_add_tail(&new_entry->list, &adapter->mac_hlist); + ret = cxgb4vf_set_addr_hash(pi); } +out: + return ret < 0 ? ret : 0; +} - /* next set up the multicast addresses */ - for (offset = 0; ; offset += naddr) { - naddr = collect_netdev_mc_list_addrs(dev, addr, offset, - ARRAY_SIZE(addr)); - if (naddr == 0) - break; +static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr) +{ + struct port_info *pi = netdev_priv(netdev); + struct adapter *adapter = pi->adapter; + int ret; + const u8 *maclist[1] = {mac_addr}; + struct hash_mac_addr *entry, *tmp; - ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, - naddr, addr, NULL, &mhash, sleep); - if (ret < 0) - return ret; - free = false; + /* If the MAC address to be removed is in the hash addr + * list, delete it from the list and update hash vector + */ + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) { + if (ether_addr_equal(entry->addr, mac_addr)) { + list_del(&entry->list); + kfree(entry); + return cxgb4vf_set_addr_hash(pi); + } } - return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, - uhash | mhash, sleep); + ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false); + return ret < 0 ? -EINVAL : 0; } /* @@ -958,16 +984,18 @@ static int set_addr_filters(const struct net_device *dev, bool sleep) */ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) { - int ret; struct port_info *pi = netdev_priv(dev); - ret = set_addr_filters(dev, sleep_ok); - if (ret == 0) - ret = t4vf_set_rxmode(pi->adapter, pi->viid, -1, - (dev->flags & IFF_PROMISC) != 0, - (dev->flags & IFF_ALLMULTI) != 0, - 1, -1, sleep_ok); - return ret; + if (!(dev->flags & IFF_PROMISC)) { + __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync); + if (!(dev->flags & IFF_ALLMULTI)) + __dev_mc_sync(dev, cxgb4vf_mac_sync, + cxgb4vf_mac_unsync); + } + return t4vf_set_rxmode(pi->adapter, pi->viid, -1, + (dev->flags & IFF_PROMISC) != 0, + (dev->flags & IFF_ALLMULTI) != 0, + 1, -1, sleep_ok); } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 88b8981b4751..6ce302fe1a61 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -285,6 +285,24 @@ static inline int is_t4(enum chip_type chip) return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; } +/** + * hash_mac_addr - return the hash value of a MAC address + * @addr: the 48-bit Ethernet MAC address + * + * Hashes a MAC address according to the hash function used by hardware + * inexact (hash) address matching. + */ +static inline int hash_mac_addr(const u8 *addr) +{ + u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; + u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; + + a ^= b; + a ^= (a >> 12); + a ^= (a >> 6); + return a & 0x3f; +} + int t4vf_wait_dev_ready(struct adapter *); int t4vf_port_init(struct adapter *, int); @@ -320,6 +338,8 @@ int t4vf_set_rxmode(struct adapter *, unsigned int, int, int, int, int, int, bool); int t4vf_alloc_mac_filt(struct adapter *, unsigned int, bool, unsigned int, const u8 **, u16 *, u64 *, bool); +int t4vf_free_mac_filt(struct adapter *, unsigned int, unsigned int naddr, + const u8 **, bool); int t4vf_change_mac(struct adapter *, unsigned int, int, const u8 *, bool); int t4vf_set_addr_hash(struct adapter *, unsigned int, bool, u64, bool); int t4vf_get_port_stats(struct adapter *, int, struct t4vf_port_stats *); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index b6fa74aafe47..54220117dcba 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -236,23 +236,6 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, return -ETIMEDOUT; } -/** - * hash_mac_addr - return the hash value of a MAC address - * @addr: the 48-bit Ethernet MAC address - * - * Hashes a MAC address according to the hash function used by hardware - * inexact (hash) address matching. - */ -static int hash_mac_addr(const u8 *addr) -{ - u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; - u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; - a ^= b; - a ^= (a >> 12); - a ^= (a >> 6); - return a & 0x3f; -} - #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG) @@ -1265,6 +1248,77 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free, return ret; } +/** + * t4vf_free_mac_filt - frees exact-match filters of given MAC addresses + * @adapter: the adapter + * @viid: the VI id + * @naddr: the number of MAC addresses to allocate filters for (up to 7) + * @addr: the MAC address(es) + * @sleep_ok: call is allowed to sleep + * + * Frees the exact-match filter for each of the supplied addresses + * + * Returns a negative error number or the number of filters freed. + */ +int t4vf_free_mac_filt(struct adapter *adapter, unsigned int viid, + unsigned int naddr, const u8 **addr, bool sleep_ok) +{ + int offset, ret = 0; + struct fw_vi_mac_cmd cmd; + unsigned int nfilters = 0; + unsigned int max_naddr = adapter->params.arch.mps_tcam_size; + unsigned int rem = naddr; + + if (naddr > max_naddr) + return -EINVAL; + + for (offset = 0; offset < (int)naddr ; /**/) { + unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact) ? + rem : ARRAY_SIZE(cmd.u.exact)); + size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, + u.exact[fw_naddr]), 16); + struct fw_vi_mac_exact *p; + int i; + + memset(&cmd, 0, sizeof(cmd)); + cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | + FW_CMD_REQUEST_F | + FW_CMD_WRITE_F | + FW_CMD_EXEC_V(0) | + FW_VI_MAC_CMD_VIID_V(viid)); + cmd.freemacs_to_len16 = + cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) | + FW_CMD_LEN16_V(len16)); + + for (i = 0, p = cmd.u.exact; i < (int)fw_naddr; i++, p++) { + p->valid_to_idx = cpu_to_be16( + FW_VI_MAC_CMD_VALID_F | + FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE)); + memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr)); + } + + ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &cmd, + sleep_ok); + if (ret) + break; + + for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) { + u16 index = FW_VI_MAC_CMD_IDX_G( + be16_to_cpu(p->valid_to_idx)); + + if (index < max_naddr) + nfilters++; + } + + offset += fw_naddr; + rem -= fw_naddr; + } + + if (ret == 0) + ret = nfilters; + return ret; +} + /** * t4vf_change_mac - modifies the exact-match filter for a MAC address * @adapter: the adapter -- cgit v1.2.3 From 1e9f12ec92ab7307ac7386924e343905f7f12205 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 18 Feb 2016 11:22:49 +0100 Subject: geneve: implement geneve_get_sk_family helper Similarly to the existing vxlan_get_sk_family. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/geneve.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6f208132a574..f09de1e30955 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -110,6 +110,11 @@ static __be64 vni_to_tunnel_id(const __u8 *vni) #endif } +static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) +{ + return gs->sock->sk->sk_family; +} + static struct geneve_dev *geneve_lookup(struct geneve_sock *gs, __be32 addr, u8 vni[]) { @@ -165,16 +170,13 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) static u8 zero_vni[3]; u8 *vni; int err = 0; - sa_family_t sa_family; #if IS_ENABLED(CONFIG_IPV6) struct ipv6hdr *ip6h = NULL; struct in6_addr addr6; static struct in6_addr zero_addr6; #endif - sa_family = gs->sock->sk->sk_family; - - if (sa_family == AF_INET) { + if (geneve_get_sk_family(gs) == AF_INET) { iph = ip_hdr(skb); /* outer IP header... */ if (gs->collect_md) { @@ -188,7 +190,7 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) geneve = geneve_lookup(gs, addr, vni); #if IS_ENABLED(CONFIG_IPV6) - } else if (sa_family == AF_INET6) { + } else if (geneve_get_sk_family(gs) == AF_INET6) { ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ if (gs->collect_md) { @@ -213,7 +215,7 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) (gnvh->oam ? TUNNEL_OAM : 0) | (gnvh->critical ? TUNNEL_CRIT_OPT : 0); - tun_dst = udp_tun_rx_dst(skb, sa_family, flags, + tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags, vni_to_tunnel_id(gnvh->vni), gnvh->opt_len * 4); if (!tun_dst) @@ -392,7 +394,7 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs) struct net_device *dev; struct sock *sk = gs->sock->sk; struct net *net = sock_net(sk); - sa_family_t sa_family = sk->sk_family; + sa_family_t sa_family = geneve_get_sk_family(gs); __be16 port = inet_sk(sk)->inet_sport; int err; @@ -553,7 +555,7 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs) struct net_device *dev; struct sock *sk = gs->sock->sk; struct net *net = sock_net(sk); - sa_family_t sa_family = sk->sk_family; + sa_family_t sa_family = geneve_get_sk_family(gs); __be16 port = inet_sk(sk)->inet_sport; rcu_read_lock(); @@ -596,7 +598,7 @@ static struct geneve_sock *geneve_find_sock(struct geneve_net *gn, list_for_each_entry(gs, &gn->sock_list, list) { if (inet_sk(gs->sock->sk)->inet_sport == dst_port && - inet_sk(gs->sock->sk)->sk.sk_family == family) { + geneve_get_sk_family(gs) == family) { return gs; } } -- cgit v1.2.3 From 9fc4754582bf46b0998a64fce74f570cea720e18 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 18 Feb 2016 11:22:50 +0100 Subject: geneve: move geneve device lookup before iptunnel_pull_header This is in preparation for iptunnel_pull_header calling skb_scrub_packet. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/geneve.c | 76 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index f09de1e30955..4ceccf871b3f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -158,55 +158,60 @@ static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) return (struct genevehdr *)(udp_hdr(skb) + 1); } -/* geneve receive/decap routine */ -static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) +static struct geneve_dev *geneve_lookup_skb(struct geneve_sock *gs, + struct sk_buff *skb) { - struct genevehdr *gnvh = geneve_hdr(skb); - struct metadata_dst *tun_dst = NULL; - struct geneve_dev *geneve = NULL; - struct pcpu_sw_netstats *stats; - struct iphdr *iph = NULL; + u8 *vni; __be32 addr; static u8 zero_vni[3]; - u8 *vni; - int err = 0; #if IS_ENABLED(CONFIG_IPV6) - struct ipv6hdr *ip6h = NULL; - struct in6_addr addr6; static struct in6_addr zero_addr6; #endif if (geneve_get_sk_family(gs) == AF_INET) { + struct iphdr *iph; + iph = ip_hdr(skb); /* outer IP header... */ if (gs->collect_md) { vni = zero_vni; addr = 0; } else { - vni = gnvh->vni; - + vni = geneve_hdr(skb)->vni; addr = iph->saddr; } - geneve = geneve_lookup(gs, addr, vni); + return geneve_lookup(gs, addr, vni); #if IS_ENABLED(CONFIG_IPV6) } else if (geneve_get_sk_family(gs) == AF_INET6) { + struct ipv6hdr *ip6h; + struct in6_addr addr6; + ip6h = ipv6_hdr(skb); /* outer IPv6 header... */ if (gs->collect_md) { vni = zero_vni; addr6 = zero_addr6; } else { - vni = gnvh->vni; - + vni = geneve_hdr(skb)->vni; addr6 = ip6h->saddr; } - geneve = geneve6_lookup(gs, addr6, vni); + return geneve6_lookup(gs, addr6, vni); #endif } - if (!geneve) - goto drop; + return NULL; +} + +/* geneve receive/decap routine */ +static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, + struct sk_buff *skb) +{ + struct genevehdr *gnvh = geneve_hdr(skb); + struct metadata_dst *tun_dst = NULL; + struct pcpu_sw_netstats *stats; + int err = 0; + void *oiph; if (ip_tunnel_collect_metadata() || gs->collect_md) { __be16 flags; @@ -243,25 +248,27 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb) if (ether_addr_equal(eth_hdr(skb)->h_source, geneve->dev->dev_addr)) goto drop; + oiph = skb_network_header(skb); skb_reset_network_header(skb); - if (iph) - err = IP_ECN_decapsulate(iph, skb); + if (geneve_get_sk_family(gs) == AF_INET) + err = IP_ECN_decapsulate(oiph, skb); #if IS_ENABLED(CONFIG_IPV6) - if (ip6h) - err = IP6_ECN_decapsulate(ip6h, skb); + else + err = IP6_ECN_decapsulate(oiph, skb); #endif if (unlikely(err)) { if (log_ecn_error) { - if (iph) + if (geneve_get_sk_family(gs) == AF_INET) net_info_ratelimited("non-ECT from %pI4 " "with TOS=%#x\n", - &iph->saddr, iph->tos); + &((struct iphdr *)oiph)->saddr, + ((struct iphdr *)oiph)->tos); #if IS_ENABLED(CONFIG_IPV6) - if (ip6h) + else net_info_ratelimited("non-ECT from %pI6\n", - &ip6h->saddr); + &((struct ipv6hdr *)oiph)->saddr); #endif } if (err > 1) { @@ -323,6 +330,7 @@ static void geneve_uninit(struct net_device *dev) static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct genevehdr *geneveh; + struct geneve_dev *geneve; struct geneve_sock *gs; int opts_len; @@ -338,16 +346,20 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (unlikely(geneveh->proto_type != htons(ETH_P_TEB))) goto error; + gs = rcu_dereference_sk_user_data(sk); + if (!gs) + goto drop; + + geneve = geneve_lookup_skb(gs, skb); + if (!geneve) + goto drop; + opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, htons(ETH_P_TEB))) goto drop; - gs = rcu_dereference_sk_user_data(sk); - if (!gs) - goto drop; - - geneve_rx(gs, skb); + geneve_rx(geneve, gs, skb); return 0; drop: -- cgit v1.2.3 From c9e78efb6f668c42e0e0f47398e814b758b95336 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 18 Feb 2016 11:22:51 +0100 Subject: vxlan: move vxlan device lookup before iptunnel_pull_header This is in preparation for iptunnel_pull_header calling skb_scrub_packet. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 75bccb360599..16a176cd0dad 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1187,22 +1187,16 @@ out: unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } -static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, - struct vxlan_metadata *md, __be32 vni, +static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, + struct sk_buff *skb, struct vxlan_metadata *md, struct metadata_dst *tun_dst) { struct iphdr *oip = NULL; struct ipv6hdr *oip6 = NULL; - struct vxlan_dev *vxlan; struct pcpu_sw_netstats *stats; union vxlan_addr saddr; int err = 0; - /* Is this VNI defined? */ - vxlan = vxlan_vs_find_vni(vs, vni); - if (!vxlan) - goto drop; - skb_reset_mac_header(skb); skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); skb->protocol = eth_type_trans(skb, vxlan->dev); @@ -1281,6 +1275,7 @@ drop: static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; + struct vxlan_dev *vxlan; struct vxlan_sock *vs; struct vxlanhdr unparsed; struct vxlan_metadata _md; @@ -1302,13 +1297,17 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) unparsed.vx_flags &= ~VXLAN_HF_VNI; unparsed.vx_vni &= ~VXLAN_VNI_MASK; - if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) - goto drop; - vs = rcu_dereference_sk_user_data(sk); if (!vs) goto drop; + vxlan = vxlan_vs_find_vni(vs, vxlan_vni(vxlan_hdr(skb)->vx_vni)); + if (!vxlan) + goto drop; + + if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) + goto drop; + if (vxlan_collect_metadata(vs)) { __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); @@ -1344,7 +1343,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; } - vxlan_rcv(vs, skb, md, vxlan_vni(vxlan_hdr(skb)->vx_vni), tun_dst); + vxlan_rcv(vxlan, vs, skb, md, tun_dst); return 0; drop: -- cgit v1.2.3 From 7f290c94352e59b1d720055fce760a69a63bd0a1 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 18 Feb 2016 11:22:52 +0100 Subject: iptunnel: scrub packet in iptunnel_pull_header Part of skb_scrub_packet was open coded in iptunnel_pull_header. Let it call skb_scrub_packet directly instead. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/geneve.c | 4 ++-- drivers/net/vxlan.c | 4 ++-- include/net/ip_tunnels.h | 3 ++- net/ipv4/ip_gre.c | 2 +- net/ipv4/ip_tunnel_core.c | 8 +++----- net/ipv4/ipip.c | 2 +- net/ipv6/sit.c | 2 +- 7 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4ceccf871b3f..dfbe3ca687f7 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -237,7 +237,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs, } skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(geneve->net, dev_net(geneve->dev))); skb->protocol = eth_type_trans(skb, geneve->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -356,7 +355,8 @@ static int geneve_udp_encap_recv(struct sock *sk, struct sk_buff *skb) opts_len = geneveh->opt_len * 4; if (iptunnel_pull_header(skb, GENEVE_BASE_HLEN + opts_len, - htons(ETH_P_TEB))) + htons(ETH_P_TEB), + !net_eq(geneve->net, dev_net(geneve->dev)))) goto drop; geneve_rx(geneve, gs, skb); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 16a176cd0dad..c963897e713d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1198,7 +1198,6 @@ static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, int err = 0; skb_reset_mac_header(skb); - skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev))); skb->protocol = eth_type_trans(skb, vxlan->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); @@ -1305,7 +1304,8 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!vxlan) goto drop; - if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) + if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB), + !net_eq(vxlan->net, dev_net(vxlan->dev)))) goto drop; if (vxlan_collect_metadata(vs)) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 87408ab80856..4dd616376fec 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -270,7 +270,8 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, return INET_ECN_encapsulate(tos, inner); } -int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, + bool xnet); void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, u8 proto, u8 tos, u8 ttl, __be16 df, bool xnet); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 917c2c1bfadd..12071e28d958 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -238,7 +238,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi, return -EINVAL; } } - return iptunnel_pull_header(skb, hdr_len, tpi->proto); + return iptunnel_pull_header(skb, hdr_len, tpi->proto, false); } static void ipgre_err(struct sk_buff *skb, u32 info, diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index a6e58b6141cd..eaca2449a09a 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -86,7 +86,8 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, } EXPORT_SYMBOL_GPL(iptunnel_xmit); -int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) +int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, + bool xnet) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; @@ -109,13 +110,10 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) skb->protocol = inner_proto; } - nf_reset(skb); - secpath_reset(skb); skb_clear_hash_if_not_l4(skb); - skb_dst_drop(skb); skb->vlan_tci = 0; skb_set_queue_mapping(skb, 0); - skb->pkt_type = PACKET_HOST; + skb_scrub_packet(skb, xnet); return 0; } EXPORT_SYMBOL_GPL(iptunnel_pull_header); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 6ec5b42fd172..ec51d02166de 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -195,7 +195,7 @@ static int ipip_rcv(struct sk_buff *skb) if (tunnel) { if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; - if (iptunnel_pull_header(skb, 0, tpi.proto)) + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) goto drop; return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 0625ac6356b5..f45b8ffc2840 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -740,7 +740,7 @@ static int ipip_rcv(struct sk_buff *skb) if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto drop; - if (iptunnel_pull_header(skb, 0, tpi.proto)) + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) goto drop; return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, log_ecn_error); } -- cgit v1.2.3 From e12a285c9beccbdbb0346cce847dcd779096be5f Mon Sep 17 00:00:00 2001 From: "Andrew F. Davis" Date: Wed, 17 Feb 2016 18:10:00 -0600 Subject: net: phy: dp83848: Fix sysfs naming collision warning Files in sysfs are created using the name from the phy_driver struct, when two names are the same we may get a duplicate filename warning, fix this. Reported-by: kernel test robot Signed-off-by: Andrew F. Davis Signed-off-by: David S. Miller --- drivers/net/phy/dp83848.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 556904f572d6..03d54c4adc88 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -103,7 +103,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); static struct phy_driver dp83848_driver[] = { DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), - DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"), + DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"), DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"), }; module_phy_driver(dp83848_driver); -- cgit v1.2.3 From 3f9b4a6972d50562613daa649ed064244e6bc7bb Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Thu, 18 Feb 2016 17:00:39 +0200 Subject: qed: Lay infrastructure for vlan filtering offload Today, interfaces are working in vlan-promisc mode; But once vlan filtering offloaded would be supported, we'll need a method to control it directly [e.g., when setting device to PROMISC, or when running out of vlan credits]. This adds the necessary API for L2 client to manually choose whether to accept all vlans or only those for which filters were configured. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 23 +++++++++++++++++++---- include/linux/qed/qed_eth_if.h | 2 ++ 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 978d07a61bbf..73feaf7eedb8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -124,6 +124,8 @@ struct qed_sp_vport_update_params { u8 update_vport_active_tx_flg; u8 vport_active_tx_flg; u8 update_approx_mcast_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; unsigned long bins[8]; struct qed_rss_params *rss_params; struct qed_filter_accept_flags accept_flags; @@ -393,7 +395,9 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg; p_cmn->tx_active_flg = p_params->vport_active_tx_flg; p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg; - + p_cmn->accept_any_vlan = p_params->accept_any_vlan; + p_cmn->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); if (rc) { /* Return spq entry which is taken in qed_sp_init_request()*/ @@ -444,8 +448,10 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, static int qed_filter_accept_cmd(struct qed_dev *cdev, u8 vport, struct qed_filter_accept_flags accept_flags, - enum spq_mode comp_mode, - struct qed_spq_comp_cb *p_comp_data) + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct qed_spq_comp_cb *p_comp_data) { struct qed_sp_vport_update_params vport_update_params; int i, rc; @@ -454,6 +460,8 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, memset(&vport_update_params, 0, sizeof(vport_update_params)); vport_update_params.vport_id = vport; vport_update_params.accept_flags = accept_flags; + vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan; + vport_update_params.accept_any_vlan = accept_any_vlan; for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; @@ -471,6 +479,10 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev, "Accept filter configured, flags = [Rx]%x [Tx]%x\n", accept_flags.rx_accept_filter, accept_flags.tx_accept_filter); + if (update_accept_any_vlan) + DP_VERBOSE(p_hwfn, QED_MSG_SP, + "accept_any_vlan=%d configured\n", + accept_any_vlan); } return 0; @@ -1347,6 +1359,9 @@ static int qed_update_vport(struct qed_dev *cdev, params->update_vport_active_flg; sp_params.vport_active_rx_flg = params->vport_active_flg; sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.accept_any_vlan = params->accept_any_vlan; + sp_params.update_accept_any_vlan_flg = + params->update_accept_any_vlan_flg; /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. * We need to re-fix the rss values per engine for CMT. @@ -1566,7 +1581,7 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; - return qed_filter_accept_cmd(cdev, 0, accept_flags, + return qed_filter_accept_cmd(cdev, 0, accept_flags, false, false, QED_SPQ_MODE_CB, NULL); } diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index 81ab178e31c1..e53b0ca49e41 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -33,6 +33,8 @@ struct qed_update_vport_params { u8 vport_id; u8 update_vport_active_flg; u8 vport_active_flg; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; u8 update_rss_flg; struct qed_update_vport_rss_params rss_params; }; -- cgit v1.2.3 From 7c1bfcad9f3c8a7400c2dfaf67edc7a46e271c51 Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Thu, 18 Feb 2016 17:00:40 +0200 Subject: qede: Add vlan filtering offload support Device would start receiving only vlan-tagged traffic with tags matching that of one of the configured vlan IDs, unless: - Device is expliicly placed in PROMISC mode. - Device exhausts its vlan filter credits. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 10 + drivers/net/ethernet/qlogic/qede/qede_main.c | 277 ++++++++++++++++++++++++++- 2 files changed, 286 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index f75d9e0676ce..15c5528b4f39 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -100,6 +100,12 @@ struct qede_stats { u64 tx_mac_ctrl_frames; }; +struct qede_vlan { + struct list_head list; + u16 vid; + bool configured; +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -154,6 +160,10 @@ struct qede_dev { u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ + struct list_head vlan_list; + u16 configured_vlans; + u16 non_configured_vlans; + bool accept_any_vlan; struct delayed_work sp_task; unsigned long sp_flags; }; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f50e0bd7fb2c..5f15e23a0f7d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1140,6 +1140,21 @@ static int qede_set_ucast_rx_mac(struct qede_dev *edev, return edev->ops->filter_config(edev->cdev, &filter_cmd); } +static int qede_set_ucast_rx_vlan(struct qede_dev *edev, + enum qed_filter_xcast_params_type opcode, + u16 vid) +{ + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return edev->ops->filter_config(edev->cdev, &filter_cmd); +} + void qede_fill_by_demand_stats(struct qede_dev *edev) { struct qed_eth_stats stats; @@ -1252,6 +1267,247 @@ static struct rtnl_link_stats64 *qede_get_stats64( return stats; } +static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action) +{ + struct qed_update_vport_params params; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (edev->accept_any_vlan == action) + return; + + memset(¶ms, 0, sizeof(params)); + + params.vport_id = 0; + params.accept_any_vlan = action; + params.update_accept_any_vlan_flg = 1; + + rc = edev->ops->vport_update(edev->cdev, ¶ms); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + edev->accept_any_vlan = action; + } +} + +static int qede_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan, *tmp; + int rc; + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan 0x%04x\n", vid); + + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + if (!vlan) { + DP_INFO(edev, "Failed to allocate struct for vlan\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&vlan->list); + vlan->vid = vid; + vlan->configured = false; + + /* Verify vlan isn't already configured */ + list_for_each_entry(tmp, &edev->vlan_list, list) { + if (tmp->vid == vlan->vid) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "vlan already configured\n"); + kfree(vlan); + return -EEXIST; + } + } + + /* If interface is down, cache this VLAN ID and return */ + if (edev->state != QEDE_STATE_OPEN) { + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, VLAN %d will be configured when interface is up\n", + vid); + if (vid != 0) + edev->non_configured_vlans++; + list_add(&vlan->list, &edev->vlan_list); + + return 0; + } + + /* Check for the filter limit. + * Note - vlan0 has a reserved filter and can be added without + * worrying about quota + */ + if ((edev->configured_vlans < edev->dev_info.num_vlan_filters) || + (vlan->vid == 0)) { + rc = qede_set_ucast_rx_vlan(edev, + QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %d\n", + vlan->vid); + kfree(vlan); + return -EINVAL; + } + vlan->configured = true; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) + edev->configured_vlans++; + } else { + /* Out of quota; Activate accept-any-VLAN mode */ + if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, true); + + edev->non_configured_vlans++; + } + + list_add(&vlan->list, &edev->vlan_list); + + return 0; +} + +static void qede_del_vlan_from_list(struct qede_dev *edev, + struct qede_vlan *vlan) +{ + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + if (vlan->configured) + edev->configured_vlans--; + else + edev->non_configured_vlans--; + } + + list_del(&vlan->list); + kfree(vlan); +} + +static int qede_configure_vlan_filters(struct qede_dev *edev) +{ + int rc = 0, real_rc = 0, accept_any_vlan = 0; + struct qed_dev_eth_info *dev_info; + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return 0; + + dev_info = &edev->dev_info; + + /* Configure non-configured vlans */ + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (vlan->configured) + continue; + + /* We have used all our credits, now enable accept_any_vlan */ + if ((vlan->vid != 0) && + (edev->configured_vlans == dev_info->num_vlan_filters)) { + accept_any_vlan = 1; + continue; + } + + DP_VERBOSE(edev, NETIF_MSG_IFUP, "Adding vlan %d\n", vlan->vid); + + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_ADD, + vlan->vid); + if (rc) { + DP_ERR(edev, "Failed to configure VLAN %u\n", + vlan->vid); + real_rc = rc; + continue; + } + + vlan->configured = true; + /* vlan0 filter doesn't consume our VLAN filter's quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans--; + edev->configured_vlans++; + } + } + + /* enable accept_any_vlan mode if we have more VLANs than credits, + * or remove accept_any_vlan mode if we've actually removed + * a non-configured vlan, and all remaining vlans are truly configured. + */ + + if (accept_any_vlan) + qede_config_accept_any_vlan(edev, true); + else if (!edev->non_configured_vlans) + qede_config_accept_any_vlan(edev, false); + + return real_rc; +} + +static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct qede_dev *edev = netdev_priv(dev); + struct qede_vlan *vlan = NULL; + int rc; + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, "Removing vlan 0x%04x\n", vid); + + /* Find whether entry exists */ + list_for_each_entry(vlan, &edev->vlan_list, list) + if (vlan->vid == vid) + break; + + if (!vlan || (vlan->vid != vid)) { + DP_VERBOSE(edev, (NETIF_MSG_IFUP | NETIF_MSG_IFDOWN), + "Vlan isn't configured\n"); + return 0; + } + + if (edev->state != QEDE_STATE_OPEN) { + /* As interface is already down, we don't have a VPORT + * instance to remove vlan filter. So just update vlan list + */ + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "Interface is down, removing VLAN from list only\n"); + qede_del_vlan_from_list(edev, vlan); + return 0; + } + + /* Remove vlan */ + rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid); + if (rc) { + DP_ERR(edev, "Failed to remove VLAN %d\n", vid); + return -EINVAL; + } + + qede_del_vlan_from_list(edev, vlan); + + /* We have removed a VLAN - try to see if we can + * configure non-configured VLAN from the list. + */ + rc = qede_configure_vlan_filters(edev); + + return rc; +} + +static void qede_vlan_mark_nonconfigured(struct qede_dev *edev) +{ + struct qede_vlan *vlan = NULL; + + if (list_empty(&edev->vlan_list)) + return; + + list_for_each_entry(vlan, &edev->vlan_list, list) { + if (!vlan->configured) + continue; + + vlan->configured = false; + + /* vlan0 filter isn't consuming out of our quota */ + if (vlan->vid != 0) { + edev->non_configured_vlans++; + edev->configured_vlans--; + } + + DP_VERBOSE(edev, NETIF_MSG_IFDOWN, + "marked vlan %d as non-configured\n", + vlan->vid); + } + + edev->accept_any_vlan = false; +} + static const struct net_device_ops qede_netdev_ops = { .ndo_open = qede_open, .ndo_stop = qede_close, @@ -1260,6 +1516,8 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, + .ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid, .ndo_get_stats64 = qede_get_stats64, }; @@ -1304,6 +1562,8 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev, edev->num_tc = edev->dev_info.num_tc; + INIT_LIST_HEAD(&edev->vlan_list); + return edev; } @@ -1335,7 +1595,7 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_HIGHDMA; ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA | - NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX; ndev->hw_features = hw_features; @@ -2342,6 +2602,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode) DP_INFO(edev, "Stopped Queues\n"); + qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); /* Release the interrupts */ @@ -2410,6 +2671,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode) edev->state = QEDE_STATE_OPEN; mutex_unlock(&edev->qede_lock); + /* Program un-configured VLANs */ + qede_configure_vlan_filters(edev); + /* Ask for link-up using current configuration */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; @@ -2670,6 +2934,17 @@ static void qede_config_rx_mode(struct net_device *ndev) goto out; } + /* take care of VLAN mode */ + if (ndev->flags & IFF_PROMISC) { + qede_config_accept_any_vlan(edev, true); + } else if (!edev->non_configured_vlans) { + /* It's possible that accept_any_vlan mode is set due to a + * previous setting of IFF_PROMISC. If vlan credits are + * sufficient, disable accept_any_vlan. + */ + qede_config_accept_any_vlan(edev, false); + } + rx_mode.filter.accept_flags = accept_flags; edev->ops->filter_config(edev->cdev, &rx_mode); out: -- cgit v1.2.3 From fad57330b6d0710fdf39dc1c2b28ccebb97ae8a1 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:22 -0800 Subject: i40e/i40evf: Clean-up Rx packet checksum handling This is mostly a minor clean-up for the Rx checksum path in order to avoid some of the unnecessary conditional checks that were being applied. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 23 ++++++++++------------- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 23 ++++++++++------------- 2 files changed, 20 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 6b08b0fa6dcf..ded73c021510 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1389,13 +1389,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4 = false, ipv6 = false; - bool ipv4_tunnel, ipv6_tunnel; - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; @@ -1411,12 +1405,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (!(decoded.known && decoded.outer_ip)) return; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) - ipv4 = true; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) - ipv6 = true; + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -1447,6 +1439,11 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * inner checksum report CHECKSUM_UNNECESSARY. */ + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 143c5703f54e..3f40e0e26066 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -861,13 +861,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, u16 rx_ptype) { struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype); - bool ipv4 = false, ipv6 = false; - bool ipv4_tunnel, ipv6_tunnel; - - ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); - ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && - (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel; skb->ip_summed = CHECKSUM_NONE; @@ -883,12 +877,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, if (!(decoded.known && decoded.outer_ip)) return; - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4) - ipv4 = true; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6) - ipv6 = true; + ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); + ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && + (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -919,6 +911,11 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * inner checksum report CHECKSUM_UNNECESSARY. */ + ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4); + ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) && + (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4); + skb->ip_summed = CHECKSUM_UNNECESSARY; skb->csum_level = ipv4_tunnel || ipv6_tunnel; -- cgit v1.2.3 From 5453205cd0975b845f6f8837f0c2b7c8cb80fcf8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:29 -0800 Subject: i40e/i40evf: Enable support for SKB_GSO_UDP_TUNNEL_CSUM The XL722 has support for providing the outer UDP tunnel checksum on transmits. Make use of this feature to support segmenting UDP tunnels with outer checksums enabled. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 19 ++++++++++++++++++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 19 ++++++++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index ded73c021510..1955c849a452 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2272,6 +2272,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, } ip; union { struct tcphdr *tcp; + struct udphdr *udp; unsigned char *hdr; } l4; u32 paylen, l4_offset; @@ -2298,7 +2299,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE)) { + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = (__force u16)l4.udp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.udp->check = ~csum_fold((__force __wsum)paylen); + } + /* reset pointers to inner headers */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); @@ -2460,6 +2472,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + /* record tunnel offload values */ *cd_tunneling |= tunnel; diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 3f40e0e26066..6d66fcdc6122 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1532,6 +1532,7 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, } ip; union { struct tcphdr *tcp; + struct udphdr *udp; unsigned char *hdr; } l4; u32 paylen, l4_offset; @@ -1558,7 +1559,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, ip.v6->payload_len = 0; } - if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE)) { + if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE | + SKB_GSO_UDP_TUNNEL_CSUM)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { + /* determine offset of outer transport header */ + l4_offset = l4.hdr - skb->data; + + /* remove payload length from outer checksum */ + paylen = (__force u16)l4.udp->check; + paylen += ntohs(1) * (u16)~(skb->len - l4_offset); + l4.udp->check = ~csum_fold((__force __wsum)paylen); + } + /* reset pointers to inner headers */ ip.hdr = skb_inner_network_header(skb); l4.hdr = skb_inner_transport_header(skb); @@ -1678,6 +1690,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, tunnel |= ((ip.hdr - l4.hdr) / 2) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; + /* indicate if we need to offload outer UDP header */ + if ((*tx_flags & I40E_TX_FLAGS_TSO) && + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) + tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK; + /* record tunnel offload values */ *cd_tunneling |= tunnel; -- cgit v1.2.3 From 6b037cd465ff6e5f6b96524658f47d59d1acc554 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:36 -0800 Subject: i40e: Fix ATR in relation to tunnels This patch contains a number of fixes to make certain that we are using the correct protocols when parsing both the inner and outer headers of a frame that is mixed between IPv4 and IPv6 for inner and outer. Signed-off-by: Alexander Duyck Acked-by: Kiran Patil Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 28 +++++++++++----------------- 1 file changed, 11 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1955c849a452..159fb6eed375 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2030,10 +2030,9 @@ tx_only: * @tx_ring: ring to add programming descriptor to * @skb: send buffer * @tx_flags: send tx flags - * @protocol: wire protocol **/ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, - u32 tx_flags, __be16 protocol) + u32 tx_flags) { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; @@ -2045,6 +2044,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, struct tcphdr *th; unsigned int hlen; u32 flex_ptype, dtype_cmd; + u8 l4_proto; u16 i; /* make sure ATR is enabled */ @@ -2058,6 +2058,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!tx_ring->atr_sample_rate) return; + /* Currently only IPv4/IPv6 with TCP is supported */ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; @@ -2065,29 +2066,22 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, /* snag network header to get L4 type and address */ hdr.network = skb_network_header(skb); - /* Currently only IPv4/IPv6 with TCP is supported - * access ihl as u8 to avoid unaligned access on ia64 - */ + /* access ihl as u8 to avoid unaligned access on ia64 */ if (tx_flags & I40E_TX_FLAGS_IPV4) hlen = (hdr.network[0] & 0x0F) << 2; - else if (protocol == htons(ETH_P_IPV6)) - hlen = sizeof(struct ipv6hdr); else - return; + hlen = sizeof(struct ipv6hdr); } else { hdr.network = skb_inner_network_header(skb); hlen = skb_inner_network_header_len(skb); } - /* Currently only IPv4/IPv6 with TCP is supported - * Note: tx_flags gets modified to reflect inner protocols in + /* Note: tx_flags gets modified to reflect inner protocols in * tx_enable_csum function if encap is enabled. */ - if ((tx_flags & I40E_TX_FLAGS_IPV4) && - (hdr.ipv4->protocol != IPPROTO_TCP)) - return; - else if ((tx_flags & I40E_TX_FLAGS_IPV6) && - (hdr.ipv6->nexthdr != IPPROTO_TCP)) + l4_proto = (tx_flags & I40E_TX_FLAGS_IPV4) ? hdr.ipv4->protocol : + hdr.ipv6->nexthdr; + if (l4_proto != IPPROTO_TCP) return; th = (struct tcphdr *)(hdr.network + hlen); @@ -2124,7 +2118,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & I40E_TXD_FLTR_QW0_QINDEX_MASK; - flex_ptype |= (protocol == htons(ETH_P_IP)) ? + flex_ptype |= (tx_flags & I40E_TX_FLAGS_IPV4) ? (I40E_FILTER_PCTYPE_NONF_IPV4_TCP << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) : (I40E_FILTER_PCTYPE_NONF_IPV6_TCP << @@ -2992,7 +2986,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, * * NOTE: this must always be directly before the data descriptor. */ - i40e_atr(tx_ring, skb, tx_flags, protocol); + i40e_atr(tx_ring, skb, tx_flags); i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, td_cmd, td_offset); -- cgit v1.2.3 From 84d5946d49cf9552d0f1740ad62d0f126cb3b6a9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:43 -0800 Subject: i40e: Do not drop support for IPv6 VXLAN or GENEVE tunnels All of the documentation in the datasheets for the XL710 do not call out any reason to exclude support for IPv6 based tunnels. As such I am dropping the code that was excluding these tunnel types from having their port numbers recognized. This way we can take advantage of things such as checksum offload for inner headers over IPv6 based VXLAN or GENEVE tunnels. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0fa52ed1a896..955dc71d1236 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8626,9 +8626,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, u8 next_idx; u8 idx; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8668,9 +8665,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev, struct i40e_pf *pf = vsi->back; u8 idx; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8707,9 +8701,6 @@ static void i40e_add_geneve_port(struct net_device *netdev, if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) return; - if (sa_family == AF_INET6) - return; - idx = i40e_get_udp_port_idx(pf, port); /* Check if port already exists */ @@ -8751,9 +8742,6 @@ static void i40e_del_geneve_port(struct net_device *netdev, struct i40e_pf *pf = vsi->back; u8 idx; - if (sa_family == AF_INET6) - return; - if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)) return; -- cgit v1.2.3 From bc5d252b363cca63b7ddc1e20dd8b8b242631006 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:50 -0800 Subject: i40e: Update feature flags to reflect newly enabled features Recent changes should have enabled support for IPv6 based tunnels and support for TSO with outer UDP checksums. As such we can update the feature flags to reflect that. In addition we can clean-up the flags that aren't needed such as SCTP and RXCSUM since having the bits there doesn't add any value. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 955dc71d1236..2f2b2d714f63 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9032,10 +9032,14 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) np = netdev_priv(netdev); np->vsi = vsi; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | - NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_GRE | - NETIF_F_TSO | + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM | 0; netdev->features = NETIF_F_SG | @@ -9057,6 +9061,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) netdev->features |= NETIF_F_NTUPLE; + if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; -- cgit v1.2.3 From f608e6a60fc85e4f261daab5e7aac6225e2120d6 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Sun, 24 Jan 2016 21:17:57 -0800 Subject: i40evf: Update feature flags to reflect newly enabled features Recent changes should have enabled support for IPv6 based tunnels and support for TSO with outer UDP checksums. As such we can update the feature flags to reflect that. In addition we can clean-up the flags that aren't needed such as SCTP and RXCSUM since having the bits there doesn't add any value. I also found one spot where we were setting the same flag twice. It looks like it was probably a git merge error that resulted in the line being duplicated. As such I have dropped it in this patch. Signed-off-by: Alexander Duyck Acked-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 41369a30dfb8..3396fe32cc6d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2337,9 +2337,24 @@ int i40evf_process_config(struct i40evf_adapter *adapter) NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM | NETIF_F_GRO; + netdev->hw_enc_features |= NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_TSO_ECN | + NETIF_F_GSO_GRE | + NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + + if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE) + netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; netdev->hw_features &= ~NETIF_F_RXCSUM; @@ -2478,6 +2493,10 @@ static void i40evf_init_task(struct work_struct *work) default: goto err_alloc; } + + if (hw->mac.type == I40E_MAC_X722_VF) + adapter->flags |= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE; + if (i40evf_process_config(adapter)) goto err_alloc; adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN; @@ -2518,10 +2537,6 @@ static void i40evf_init_task(struct work_struct *work) if (err) goto err_sw_init; i40evf_map_rings_to_vectors(adapter); - if (adapter->vf_res->vf_offload_flags & - I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) - adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; - if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; -- cgit v1.2.3 From ffcc55c0c2a85835a4ac080bc1053c3a277b88e2 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Mon, 25 Jan 2016 19:32:54 -0800 Subject: i40e: Add support for ATR w/ IPv6 extension headers This patch updates the code for determining the L4 protocol and L3 header length so that when IPv6 extension headers are being used we can determine the offset and type of the L4 protocol. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 159fb6eed375..1d3afa7dda18 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2044,7 +2044,7 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, struct tcphdr *th; unsigned int hlen; u32 flex_ptype, dtype_cmd; - u8 l4_proto; + int l4_proto; u16 i; /* make sure ATR is enabled */ @@ -2062,25 +2062,23 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6))) return; - if (!(tx_flags & I40E_TX_FLAGS_UDP_TUNNEL)) { - /* snag network header to get L4 type and address */ - hdr.network = skb_network_header(skb); + /* snag network header to get L4 type and address */ + hdr.network = (tx_flags & I40E_TX_FLAGS_UDP_TUNNEL) ? + skb_inner_network_header(skb) : skb_network_header(skb); + /* Note: tx_flags gets modified to reflect inner protocols in + * tx_enable_csum function if encap is enabled. + */ + if (tx_flags & I40E_TX_FLAGS_IPV4) { /* access ihl as u8 to avoid unaligned access on ia64 */ - if (tx_flags & I40E_TX_FLAGS_IPV4) - hlen = (hdr.network[0] & 0x0F) << 2; - else - hlen = sizeof(struct ipv6hdr); + hlen = (hdr.network[0] & 0x0F) << 2; + l4_proto = hdr.ipv4->protocol; } else { - hdr.network = skb_inner_network_header(skb); - hlen = skb_inner_network_header_len(skb); + hlen = hdr.network - skb->data; + l4_proto = ipv6_find_hdr(skb, &hlen, IPPROTO_TCP, NULL, NULL); + hlen -= hdr.network - skb->data; } - /* Note: tx_flags gets modified to reflect inner protocols in - * tx_enable_csum function if encap is enabled. - */ - l4_proto = (tx_flags & I40E_TX_FLAGS_IPV4) ? hdr.ipv4->protocol : - hdr.ipv6->nexthdr; if (l4_proto != IPPROTO_TCP) return; -- cgit v1.2.3 From 376471a7b6e93067cb8a0ce5e57e8bd6071eebdd Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 17 Feb 2016 13:15:14 +0200 Subject: bnx2x: Add missing HSI for big-endian machines Commit e5d3a51cefbb ("bnx2x: extend DCBx support") was missing HSI changes for big-endian machine, breaking compilation on such platforms. Reported-by: kbuild test robot Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index dd9d6e6100a5..04523d41b873 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1824,10 +1824,12 @@ struct dcbx_app_priority_entry { u8 pri_bitmap; u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 - #define DCBX_APP_ENTRY_SF_MASK 0x30 + #define DCBX_APP_ENTRY_SF_MASK 0xF0 #define DCBX_APP_ENTRY_SF_SHIFT 4 #define DCBX_APP_SF_ETH_TYPE 0x10 #define DCBX_APP_SF_PORT 0x20 + #define DCBX_APP_SF_UDP 0x40 + #define DCBX_APP_SF_DEFAULT 0x80 #elif defined(__LITTLE_ENDIAN) u8 appBitfield; #define DCBX_APP_ENTRY_VALID 0x01 -- cgit v1.2.3 From 4ec441df25a686518fb369086e2b34a1cedaa6c9 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 17 Feb 2016 11:02:43 -0800 Subject: i40e/i40evf: Break up xmit_descriptor_count from maybe_stop_tx In an upcoming patch I would like to have access to the descriptor count used for the data portion of the frame. For this reason I am splitting up the descriptor count function from the function that stops the ring. Also in order to try and reduce unnecessary duplication of code I am moving the slow-path portions of the code out of being inline calls so that we can just jump to them and process them instead of having to build them into each function that calls them. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 14 +++++- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 71 +++++---------------------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 44 ++++++++++++++++- drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 64 ++++++------------------ drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 42 ++++++++++++++++ 5 files changed, 123 insertions(+), 112 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 7c66ce416ec7..518d72ea1059 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1359,16 +1359,26 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; struct i40e_tx_buffer *first; u32 tx_flags = 0; + int fso, count; u8 hdr_len = 0; u8 sof = 0; u8 eof = 0; - int fso; if (i40e_fcoe_set_skb_header(skb)) goto out_drop; - if (!i40e_xmit_descriptor_count(skb, tx_ring)) + count = i40e_xmit_descriptor_count(skb); + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1d3afa7dda18..f03657022b0f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2576,7 +2576,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, * * Returns -EBUSY if a stop is needed, else 0 **/ -static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ @@ -2592,24 +2592,6 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) return 0; } -/** - * i40e_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -#ifdef I40E_FCOE -inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#else -static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -#endif -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40e_maybe_stop_tx(tx_ring, size); -} - /** * i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer @@ -2869,43 +2851,6 @@ dma_error: tx_ring->next_to_use = i; } -/** - * i40e_xmit_descriptor_count - calculate number of tx descriptors needed - * @skb: send buffer - * @tx_ring: ring to send buffer on - * - * Returns number of data descriptors needed for this skb. Returns 0 to indicate - * there is not enough descriptors available in this ring since we need at least - * one descriptor. - **/ -#ifdef I40E_FCOE -inline int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#else -static inline int i40e_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -#endif -{ - unsigned int f; - int count = 0; - - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 4 desc gap to avoid the cache line where head is, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { - tx_ring->tx_stats.tx_busy++; - return 0; - } - return count; -} - /** * i40e_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer @@ -2924,14 +2869,24 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; + int tso, count; int tsyn; - int tso; /* prefetch the data, we'll need it later */ prefetch(skb->data); - if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) + count = i40e_xmit_descriptor_count(skb); + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* prepare the xmit flags */ if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index fde5f42524fb..5dbc958293f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -331,13 +331,12 @@ int i40e_napi_poll(struct napi_struct *napi, int budget); void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, struct i40e_tx_buffer *first, u32 tx_flags, const u8 hdr_len, u32 td_cmd, u32 td_offset); -int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); -int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring); int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, struct i40e_ring *tx_ring, u32 *flags); #endif void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); +int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); /** * i40e_get_head - Retrieve head from head writeback @@ -352,4 +351,45 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } + +/** + * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + int count = 0, size = skb_headlen(skb); + + for (;;) { + count += TXD_USE_COUNT(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/** + * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40e_maybe_stop_tx(tx_ring, size); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 6d66fcdc6122..9f38127bdc11 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1858,7 +1858,7 @@ linearize_chk_done: * * Returns -EBUSY if a stop is needed, else 0 **/ -static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) { netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); /* Memory barrier before checking head and tail */ @@ -1874,20 +1874,6 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) return 0; } -/** - * i40evf_maybe_stop_tx - 1st level check for tx stop conditions - * @tx_ring: the ring to be checked - * @size: the size buffer we want to assure is available - * - * Returns 0 if stop is not needed - **/ -static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) -{ - if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) - return 0; - return __i40evf_maybe_stop_tx(tx_ring, size); -} - /** * i40evf_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -2003,7 +1989,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index), first->bytecount); - i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); + i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); /* Algorithm to optimize tail and RS bit setting: * if xmit_more is supported @@ -2085,38 +2071,6 @@ dma_error: tx_ring->next_to_use = i; } -/** - * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed - * @skb: send buffer - * @tx_ring: ring to send buffer on - * - * Returns number of data descriptors needed for this skb. Returns 0 to indicate - * there is not enough descriptors available in this ring since we need at least - * one descriptor. - **/ -static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb, - struct i40e_ring *tx_ring) -{ - unsigned int f; - int count = 0; - - /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, - * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, - * + 4 desc gap to avoid the cache line where head is, - * + 1 desc for context descriptor, - * otherwise try next time - */ - for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); - - count += TXD_USE_COUNT(skb_headlen(skb)); - if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) { - tx_ring->tx_stats.tx_busy++; - return 0; - } - return count; -} - /** * i40e_xmit_frame_ring - Sends buffer on Tx ring * @skb: send buffer @@ -2135,13 +2089,23 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, __be16 protocol; u32 td_cmd = 0; u8 hdr_len = 0; - int tso; + int tso, count; /* prefetch the data, we'll need it later */ prefetch(skb->data); - if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) + count = i40e_xmit_descriptor_count(skb); + + /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, + * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, + * + 4 desc gap to avoid the cache line where head is, + * + 1 desc for context descriptor, + * otherwise try next time + */ + if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* prepare the xmit flags */ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 6ea8701cf066..1f719d0baf7a 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -326,6 +326,7 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring); int i40evf_napi_poll(struct napi_struct *napi, int budget); void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); +int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); /** * i40e_get_head - Retrieve head from head writeback @@ -340,4 +341,45 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } + +/** + * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed + * @skb: send buffer + * @tx_ring: ring to send buffer on + * + * Returns number of data descriptors needed for this skb. Returns 0 to indicate + * there is not enough descriptors available in this ring since we need at least + * one descriptor. + **/ +static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) +{ + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; + int count = 0, size = skb_headlen(skb); + + for (;;) { + count += TXD_USE_COUNT(size); + + if (!nr_frags--) + break; + + size = skb_frag_size(frag++); + } + + return count; +} + +/** + * i40e_maybe_stop_tx - 1st level check for Tx stop conditions + * @tx_ring: the ring to be checked + * @size: the size buffer we want to assure is available + * + * Returns 0 if stop is not needed + **/ +static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) +{ + if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __i40evf_maybe_stop_tx(tx_ring, size); +} #endif /* _I40E_TXRX_H_ */ -- cgit v1.2.3 From 2d37490b82afe1d1b745811e6ce0a4d16bc5e996 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 17 Feb 2016 11:02:50 -0800 Subject: i40e/i40evf: Rewrite logic for 8 descriptor per packet check This patch is meant to rewrite the logic for how we determine if we can transmit the frame or if it needs to be linearized. The previous code for this function was using a mix of division and modulus division as a part of computing if we need to take the slow path. Instead I have replaced this by simply working with a sliding window which will tell us if the frame would be capable of causing a single packet to span several descriptors. The logic for the scan is fairly simple. If any given group of 6 fragments is less than gso_size - 1 then it is possible for us to have one byte coming out of the first fragment, 6 fragments, and one or more bytes coming out of the last fragment. This gives us a total of 8 fragments which exceeds what we can allow so we send such frames to be linearized. Arguably the use of modulus might be more exact as the approach I propose may generate some false positives. However the likelihood of us taking much of a hit for those false positives is fairly low, and I would rather not add more overhead in the case where we are receiving a frame composed of 4K pages. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 6 ++ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 105 +++++++++++++++----------- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 19 +++++ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 105 +++++++++++++++----------- drivers/net/ethernet/intel/i40evf/i40e_txrx.h | 19 +++++ 5 files changed, 162 insertions(+), 92 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 518d72ea1059..052df93f1da4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -1368,6 +1368,12 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, goto out_drop; count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = TXD_USE_COUNT(skb->len); + tx_ring->tx_stats.tx_linearize++; + } /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index f03657022b0f..5123646a895f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2593,59 +2593,71 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * __i40e_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer - * @tx_flags: collected send information * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +bool __i40e_chk_linearize(struct sk_buff *skb) { - struct skb_frag_struct *frag; - bool linearize = false; - unsigned int size = 0; - u16 num_frags; - u16 gso_segs; + const struct skb_frag_struct *frag, *stale; + int gso_size, nr_frags, sum; - num_frags = skb_shinfo(skb)->nr_frags; - gso_segs = skb_shinfo(skb)->gso_segs; + /* check to see if TSO is enabled, if so we may get a repreive */ + gso_size = skb_shinfo(skb)->gso_size; + if (unlikely(!gso_size)) + return true; - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 0; + /* no need to check if number of frags is less than 8 */ + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < I40E_MAX_BUFFER_TXD) + return false; - if (num_frags < (I40E_MAX_BUFFER_TXD)) - goto linearize_chk_done; - /* try the simple math, if we have too many frags per segment */ - if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > - I40E_MAX_BUFFER_TXD) { - linearize = true; - goto linearize_chk_done; - } - frag = &skb_shinfo(skb)->frags[0]; - /* we might still have more fragments per segment */ - do { - size += skb_frag_size(frag); - frag++; j++; - if ((size >= skb_shinfo(skb)->gso_size) && - (j < I40E_MAX_BUFFER_TXD)) { - size = (size % skb_shinfo(skb)->gso_size); - j = (size) ? 1 : 0; - } - if (j == I40E_MAX_BUFFER_TXD) { - linearize = true; - break; - } - num_frags--; - } while (num_frags); - } else { - if (num_frags >= I40E_MAX_BUFFER_TXD) - linearize = true; + /* We need to walk through the list and validate that each group + * of 6 fragments totals at least gso_size. However we don't need + * to perform such validation on the first or last 6 since the first + * 6 cannot inherit any data from a descriptor before them, and the + * last 6 cannot inherit any data from a descriptor after them. + */ + nr_frags -= I40E_MAX_BUFFER_TXD - 1; + frag = &skb_shinfo(skb)->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We + * use this as the worst case scenerio in which the frag ahead + * of us only provides one byte which is why we are limited to 6 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - gso_size; + + /* Add size of frags 1 through 5 to create our initial sum */ + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + stale = &skb_shinfo(skb)->frags[0]; + for (;;) { + sum += skb_frag_size(++frag); + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + /* use pre-decrement to avoid processing last fragment */ + if (!--nr_frags) + break; + + sum -= skb_frag_size(++stale); } -linearize_chk_done: - return linearize; + return false; } /** @@ -2876,6 +2888,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, prefetch(skb->data); count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = TXD_USE_COUNT(skb->len); + tx_ring->tx_stats.tx_linearize++; + } /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, @@ -2916,11 +2934,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - if (i40e_chk_linearize(skb, tx_flags)) { - if (skb_linearize(skb)) - goto out_drop; - tx_ring->tx_stats.tx_linearize++; - } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 5dbc958293f7..56009709528a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -337,6 +337,7 @@ int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +bool __i40e_chk_linearize(struct sk_buff *skb); /** * i40e_get_head - Retrieve head from head writeback @@ -392,4 +393,22 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) return 0; return __i40e_maybe_stop_tx(tx_ring, size); } + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @count: number of buffers used + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +{ + /* we can only support up to 8 data buffers for a single send */ + if (likely(count <= I40E_MAX_BUFFER_TXD)) + return false; + + return __i40e_chk_linearize(skb); +} #endif /* _I40E_TXRX_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 9f38127bdc11..2369db58cdb1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1796,59 +1796,71 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, } /** - * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet * @skb: send buffer - * @tx_flags: collected send information * * Note: Our HW can't scatter-gather more than 8 fragments to build * a packet on the wire and so we need to figure out the cases where we * need to linearize the skb. **/ -static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) +bool __i40evf_chk_linearize(struct sk_buff *skb) { - struct skb_frag_struct *frag; - bool linearize = false; - unsigned int size = 0; - u16 num_frags; - u16 gso_segs; + const struct skb_frag_struct *frag, *stale; + int gso_size, nr_frags, sum; - num_frags = skb_shinfo(skb)->nr_frags; - gso_segs = skb_shinfo(skb)->gso_segs; + /* check to see if TSO is enabled, if so we may get a repreive */ + gso_size = skb_shinfo(skb)->gso_size; + if (unlikely(!gso_size)) + return true; - if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { - u16 j = 0; + /* no need to check if number of frags is less than 8 */ + nr_frags = skb_shinfo(skb)->nr_frags; + if (nr_frags < I40E_MAX_BUFFER_TXD) + return false; - if (num_frags < (I40E_MAX_BUFFER_TXD)) - goto linearize_chk_done; - /* try the simple math, if we have too many frags per segment */ - if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > - I40E_MAX_BUFFER_TXD) { - linearize = true; - goto linearize_chk_done; - } - frag = &skb_shinfo(skb)->frags[0]; - /* we might still have more fragments per segment */ - do { - size += skb_frag_size(frag); - frag++; j++; - if ((size >= skb_shinfo(skb)->gso_size) && - (j < I40E_MAX_BUFFER_TXD)) { - size = (size % skb_shinfo(skb)->gso_size); - j = (size) ? 1 : 0; - } - if (j == I40E_MAX_BUFFER_TXD) { - linearize = true; - break; - } - num_frags--; - } while (num_frags); - } else { - if (num_frags >= I40E_MAX_BUFFER_TXD) - linearize = true; + /* We need to walk through the list and validate that each group + * of 6 fragments totals at least gso_size. However we don't need + * to perform such validation on the first or last 6 since the first + * 6 cannot inherit any data from a descriptor before them, and the + * last 6 cannot inherit any data from a descriptor after them. + */ + nr_frags -= I40E_MAX_BUFFER_TXD - 1; + frag = &skb_shinfo(skb)->frags[0]; + + /* Initialize size to the negative value of gso_size minus 1. We + * use this as the worst case scenerio in which the frag ahead + * of us only provides one byte which is why we are limited to 6 + * descriptors for a single transmit as the header and previous + * fragment are already consuming 2 descriptors. + */ + sum = 1 - gso_size; + + /* Add size of frags 1 through 5 to create our initial sum */ + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + sum += skb_frag_size(++frag); + + /* Walk through fragments adding latest fragment, testing it, and + * then removing stale fragments from the sum. + */ + stale = &skb_shinfo(skb)->frags[0]; + for (;;) { + sum += skb_frag_size(++frag); + + /* if sum is negative we failed to make sufficient progress */ + if (sum < 0) + return true; + + /* use pre-decrement to avoid processing last fragment */ + if (!--nr_frags) + break; + + sum -= skb_frag_size(++stale); } -linearize_chk_done: - return linearize; + return false; } /** @@ -2095,6 +2107,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, prefetch(skb->data); count = i40e_xmit_descriptor_count(skb); + if (i40e_chk_linearize(skb, count)) { + if (__skb_linearize(skb)) + goto out_drop; + count = TXD_USE_COUNT(skb->len); + tx_ring->tx_stats.tx_linearize++; + } /* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD, * + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD, @@ -2130,11 +2148,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - if (i40e_chk_linearize(skb, tx_flags)) { - if (skb_linearize(skb)) - goto out_drop; - tx_ring->tx_stats.tx_linearize++; - } skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 1f719d0baf7a..c1dd8c5c9666 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -327,6 +327,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget); void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size); +bool __i40evf_chk_linearize(struct sk_buff *skb); /** * i40e_get_head - Retrieve head from head writeback @@ -382,4 +383,22 @@ static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) return 0; return __i40evf_maybe_stop_tx(tx_ring, size); } + +/** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @count: number of buffers used + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) +{ + /* we can only support up to 8 data buffers for a single send */ + if (likely(count <= I40E_MAX_BUFFER_TXD)) + return false; + + return __i40evf_chk_linearize(skb); +} #endif /* _I40E_TXRX_H_ */ -- cgit v1.2.3 From 3bc67973e81d5104908a4ba7c2aab98a4f7bd64e Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 17 Feb 2016 11:02:56 -0800 Subject: i40e/i40evf: Move Tx checksum closer to TSO On all of the other Intel drivers we place checksum close to TSO as they have a significant amount in common and it can help to reduce the decision tree for how to handle the frame as the first check in TSO is to see if checksumming is offloaded, and if it is not we can skip _BOTH_ TSO and Tx checksum offload based on a single check. Signed-off-by: Alexander Duyck Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 12 ++++++------ drivers/net/ethernet/intel/i40evf/i40e_txrx.c | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 5123646a895f..cb52f39d514a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2929,6 +2929,12 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; + /* Always offload the checksum, since it's in the data descriptor */ + tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, + tx_ring, &cd_tunneling); + if (tso < 0) + goto out_drop; + tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); if (tsyn) @@ -2939,12 +2945,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; - /* Always offload the checksum, since it's in the data descriptor */ - tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, - tx_ring, &cd_tunneling); - if (tso < 0) - goto out_drop; - i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 2369db58cdb1..ebcc25c05796 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -2148,17 +2148,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; - skb_tx_timestamp(skb); - - /* always enable CRC insertion offload */ - td_cmd |= I40E_TX_DESC_CMD_ICRC; - /* Always offload the checksum, since it's in the data descriptor */ tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tx_ring, &cd_tunneling); if (tso < 0) goto out_drop; + skb_tx_timestamp(skb); + + /* always enable CRC insertion offload */ + td_cmd |= I40E_TX_DESC_CMD_ICRC; + i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, cd_tunneling, cd_l2tag2); -- cgit v1.2.3 From fd077cd3399b164548f538625f07f3e9f1d7ef00 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 17 Feb 2016 16:12:11 -0800 Subject: i40e: Add functions to blink led on 10GBaseT PHY This patch adds functions to blink led on devices using 10GBaseT PHY since MAC registers used in other designs do not work in this device configuration. Change-ID: Id4b88c93c649fd2b88073a00b42867a77c761ca3 Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 329 +++++++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_prototype.h | 13 + drivers/net/ethernet/intel/i40e/i40e_type.h | 16 + drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 7 + 4 files changed, 365 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index f9239330f49d..447729f438ea 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -4214,3 +4214,332 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, return status; } + +/** + * i40e_read_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_adr: PHY address on MDIO interface + * @value: PHY register value + * + * Reads specified PHY register value + **/ +i40e_status i40e_read_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, + u16 *value) +{ + i40e_status status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_ADDRESS) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_read_end; + } + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_READ) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + + if (!status) { + command = rd32(hw, I40E_GLGEN_MSRWD(port_num)); + *value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >> + I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT; + } else { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't read register value from external PHY.\n"); + } + +phy_read_end: + return status; +} + +/** + * i40e_write_phy_register + * @hw: pointer to the HW structure + * @page: registers page number + * @reg: register address in the page + * @phy_adr: PHY address on MDIO interface + * @value: PHY register value + * + * Writes value to specified PHY register + **/ +i40e_status i40e_write_phy_register(struct i40e_hw *hw, + u8 page, u16 reg, u8 phy_addr, + u16 value) +{ + i40e_status status = I40E_ERR_TIMEOUT; + u32 command = 0; + u16 retry = 1000; + u8 port_num = hw->func_caps.mdio_port_num; + + command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) | + (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_ADDRESS) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + if (status) { + i40e_debug(hw, I40E_DEBUG_PHY, + "PHY: Can't write command to external PHY.\n"); + goto phy_write_end; + } + + command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT; + wr32(hw, I40E_GLGEN_MSRWD(port_num), command); + + command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) | + (phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) | + (I40E_MDIO_OPCODE_WRITE) | + (I40E_MDIO_STCODE) | + (I40E_GLGEN_MSCA_MDICMD_MASK) | + (I40E_GLGEN_MSCA_MDIINPROGEN_MASK); + status = I40E_ERR_TIMEOUT; + retry = 1000; + wr32(hw, I40E_GLGEN_MSCA(port_num), command); + do { + command = rd32(hw, I40E_GLGEN_MSCA(port_num)); + if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) { + status = 0; + break; + } + usleep_range(10, 20); + retry--; + } while (retry); + +phy_write_end: + return status; +} + +/** + * i40e_get_phy_address + * @hw: pointer to the HW structure + * @dev_num: PHY port num that address we want + * @phy_addr: Returned PHY address + * + * Gets PHY address for current port + **/ +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num) +{ + u8 port_num = hw->func_caps.mdio_port_num; + u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num)); + + return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f; +} + +/** + * i40e_blink_phy_led + * @hw: pointer to the HW structure + * @time: time how long led will blinks in secs + * @interval: gap between LED on and off in msecs + * + * Blinks PHY link LED + **/ +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval) +{ + i40e_status status = 0; + u32 i; + u16 led_ctl; + u16 gpio_led_port; + u16 led_reg; + u16 led_addr = I40E_PHY_LED_PROV_REG_1; + u8 phy_addr = 0; + u8 port_num; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + led_addr++) { + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto phy_blinking_end; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto phy_blinking_end; + break; + } + } + + if (time > 0 && interval > 0) { + for (i = 0; i < time * 1000; i += interval) { + status = i40e_read_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + &led_reg); + if (status) + goto restore_config; + if (led_reg & I40E_PHY_LED_MANUAL_ON) + led_reg = 0; + else + led_reg = I40E_PHY_LED_MANUAL_ON; + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, + led_reg); + if (status) + goto restore_config; + msleep(interval); + } + } + +restore_config: + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, led_ctl); + +phy_blinking_end: + return status; +} + +/** + * i40e_led_get_phy - return current on/off mode + * @hw: pointer to the hw struct + * @led_addr: address of led register to use + * @val: original value of register to use + * + **/ +i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val) +{ + i40e_status status = 0; + u16 gpio_led_port; + u8 phy_addr = 0; + u16 reg_val; + u16 temp_addr; + u8 port_num; + u32 i; + + temp_addr = I40E_PHY_LED_PROV_REG_1; + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++, + temp_addr++) { + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + temp_addr, phy_addr, ®_val); + if (status) + return status; + *val = reg_val; + if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) { + *led_addr = temp_addr; + break; + } + } + return status; +} + +/** + * i40e_led_set_phy + * @hw: pointer to the HW structure + * @on: true or false + * @mode: original val plus bit for set or ignore + * Set led's on or off when controlled by the PHY + * + **/ +i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode) +{ + i40e_status status = 0; + u16 led_ctl = 0; + u16 led_reg = 0; + u8 phy_addr = 0; + u8 port_num; + u32 i; + + i = rd32(hw, I40E_PFGEN_PORTNUM); + port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK); + phy_addr = i40e_get_phy_address(hw, port_num); + + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, &led_reg); + if (status) + return status; + led_ctl = led_reg; + if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) { + led_reg = 0; + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + return status; + } + status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, &led_reg); + if (status) + goto restore_config; + if (on) + led_reg = I40E_PHY_LED_MANUAL_ON; + else + led_reg = 0; + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_reg); + if (status) + goto restore_config; + if (mode & I40E_PHY_LED_MODE_ORIG) { + led_ctl = (mode & I40E_PHY_LED_MODE_MASK); + status = i40e_write_phy_register(hw, + I40E_PHY_COM_REG_PAGE, + led_addr, phy_addr, led_ctl); + } + return status; +restore_config: + status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr, + phy_addr, led_ctl); + return status; +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index e8deabde82b4..ca2f7acd9f8b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -74,6 +74,12 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, u32 i40e_led_get(struct i40e_hw *hw); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); +i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on, + u16 led_addr, u32 mode); +i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr, + u16 *val); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); /* admin send queue commands */ @@ -336,4 +342,11 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); +i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index b59a021b7a69..0a0baf71041b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -90,6 +90,22 @@ enum i40e_debug_mask { I40E_DEBUG_ALL = 0xFFFFFFFF }; +#define I40E_MDIO_STCODE 0 +#define I40E_MDIO_OPCODE_ADDRESS 0 +#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) +#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \ + I40E_GLGEN_MSCA_OPCODE_SHIFT) + +#define I40E_PHY_COM_REG_PAGE 0x1E +#define I40E_PHY_LED_LINK_MODE_MASK 0xF0 +#define I40E_PHY_LED_MANUAL_ON 0x100 +#define I40E_PHY_LED_PROV_REG_1 0xC430 +#define I40E_PHY_LED_MODE_MASK 0xFFFF +#define I40E_PHY_LED_MODE_ORIG 0x80000000 + /* These are structs for managing the hardware information and the operations. * The structures of function pointers are filled out at init time when we * know for sure exactly which hardware we're working with. This gives us the diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index cbd9a1b078ab..fa34d859e015 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -103,4 +103,11 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); +i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 *value); +i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, + u16 reg, u8 phy_addr, u16 value); +u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); +i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, + u32 time, u32 interval); #endif /* _I40E_PROTOTYPE_H_ */ -- cgit v1.2.3 From 31b606d0c40a1435c54bff18e4d3d3c33af1c3cf Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 17 Feb 2016 16:12:12 -0800 Subject: i40e: Fix led blink capability for 10GBaseT PHY This patch fixes a problem where the ethtool identify adapter functionality did not work for some copper PHY's. Without this patch, the blink led functionality fails on some parts. This patch adds PHY write code to blink led's on parts where this functionality is contained in the PHY rather than the MAC. Change-ID: Iee7b3453f61d5ffd0b3d03f720ee4f17f919fcc2 Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e.h | 3 +++ drivers/net/ethernet/intel/i40e/i40e_common.c | 26 +++++++++++++++++++ drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 36 +++++++++++++++++++++----- drivers/net/ethernet/intel/i40e/i40e_main.c | 4 +++ 4 files changed, 63 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e0758ddd2e22..e99be9f696c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -111,6 +111,7 @@ #define I40E_OEM_VER_PATCH_MASK 0xff #define I40E_OEM_VER_BUILD_SHIFT 8 #define I40E_OEM_VER_SHIFT 24 +#define I40E_PHY_DEBUG_PORT BIT(4) /* The values in here are decimal coded as hex as is the case in the NVM map*/ #define I40E_CURRENT_NVM_VERSION_HI 0x2 @@ -355,6 +356,7 @@ struct i40e_pf { #define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) #define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) #define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) +#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48) #define I40E_FLAG_PF_MAC BIT_ULL(50) /* tracks features that get auto disabled by errors */ @@ -440,6 +442,7 @@ struct i40e_pf { u32 ioremap_len; u32 fd_inv; + u16 phy_led_val; }; struct i40e_mac_filter { diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 447729f438ea..d41719331976 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1887,6 +1887,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, return status; } +/** + * i40e_aq_set_phy_debug + * @hw: pointer to the hw struct + * @cmd_flags: debug command flags + * @cmd_details: pointer to command details structure or NULL + * + * Reset the external PHY. + **/ +enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_set_phy_debug *cmd = + (struct i40e_aqc_set_phy_debug *)&desc.params.raw; + enum i40e_status_code status; + + i40e_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_set_phy_debug); + + cmd->command_flags = cmd_flags; + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + /** * i40e_aq_add_vsi * @hw: pointer to the hw struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9fc7546bfc9b..f1ad10161792 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1826,28 +1826,52 @@ static int i40e_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct i40e_netdev_priv *np = netdev_priv(netdev); + i40e_status ret = 0; struct i40e_pf *pf = np->vsi->back; struct i40e_hw *hw = &pf->hw; int blink_freq = 2; + u16 temp_status; switch (state) { case ETHTOOL_ID_ACTIVE: - pf->led_status = i40e_led_get(hw); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) { + pf->led_status = i40e_led_get(hw); + } else { + i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL); + ret = i40e_led_get_phy(hw, &temp_status, + &pf->phy_led_val); + pf->led_status = temp_status; + } return blink_freq; case ETHTOOL_ID_ON: - i40e_led_set(hw, 0xF, false); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) + i40e_led_set(hw, 0xf, false); + else + ret = i40e_led_set_phy(hw, true, pf->led_status, 0); break; case ETHTOOL_ID_OFF: - i40e_led_set(hw, 0x0, false); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) + i40e_led_set(hw, 0x0, false); + else + ret = i40e_led_set_phy(hw, false, pf->led_status, 0); break; case ETHTOOL_ID_INACTIVE: - i40e_led_set(hw, pf->led_status, false); + if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) { + i40e_led_set(hw, false, pf->led_status); + } else { + ret = i40e_led_set_phy(hw, false, pf->led_status, + (pf->phy_led_val | + I40E_PHY_LED_MODE_ORIG)); + i40e_aq_set_phy_debug(hw, 0, NULL); + } break; default: break; } - - return 0; + if (ret) + return -ENOENT; + else + return 0; } /* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2f2b2d714f63..b29b13154d9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11130,6 +11130,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, pf->main_vsi_seid); + if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) || + (pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4)) + pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY; + /* print a string summarizing features */ i40e_print_features(pf); -- cgit v1.2.3 From 4d7cec078de864b7ba5459aa688278c4e6f3ad42 Mon Sep 17 00:00:00 2001 From: Kevin Scott Date: Wed, 17 Feb 2016 16:12:13 -0800 Subject: i40e: Increase timeout when checking GLGEN_RSTAT_DEVSTATE bit When linking with particular PHY types (ex: copper PHY), the amount of time it takes for the GLGEN_RSTAT_DEVSTATE to be set increases greatly, which can lead to a timeout and failure to load the driver. Change-ID: If02be0dfcd7c57fdde2d5c81cd63651260cd2029 Signed-off-by: Kevin Scott Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d41719331976..3a57e59c2a1a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1239,7 +1239,13 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - for (cnt = 0; cnt < grst_del + 10; cnt++) { + + /* It can take upto 15 secs for GRST steady state. + * Bump it to 16 secs max to be safe. + */ + grst_del = grst_del * 20; + + for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) break; -- cgit v1.2.3 From 3fe06f415b31ad06d2c2923216292057e899eb0d Mon Sep 17 00:00:00 2001 From: Neerav Parikh Date: Wed, 17 Feb 2016 16:12:15 -0800 Subject: i40e: Do not wait for Rx queue disable in DCB reconfig Just like Tx queues don't wait for Rx queues to be disabled before DCB has been reconfigured. Check the queues are disabled only after the DCB configuration has been applied to the VSI(s) managed by the PF driver. In case of any timeout issue a PF reset to recover. Change-ID: Ic51e94c25baf9a5480cee983f35d15575a88642c Signed-off-by: Neerav Parikh Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 33 +++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b29b13154d9f..53660f1bbc3e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3929,6 +3929,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) else rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); + /* No waiting for the Tx queue to disable */ + if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) + continue; /* wait for the change to finish */ ret = i40e_pf_rxq_wait(pf, pf_q, enable); @@ -4287,12 +4290,12 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) #ifdef CONFIG_I40E_DCB /** - * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled + * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled * @vsi: the VSI being configured * - * This function waits for the given VSI's Tx queues to be disabled. + * This function waits for the given VSI's queues to be disabled. **/ -static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) +static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; int i, pf_q, ret; @@ -4309,24 +4312,36 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) } } + pf_q = vsi->base_queue; + for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) { + /* Check and wait for the disable status of the queue */ + ret = i40e_pf_rxq_wait(pf, pf_q, false); + if (ret) { + dev_info(&pf->pdev->dev, + "VSI seid %d Rx ring %d disable timeout\n", + vsi->seid, pf_q); + return ret; + } + } + return 0; } /** - * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled + * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled * @pf: the PF * - * This function waits for the Tx queues to be in disabled state for all the + * This function waits for the queues to be in disabled state for all the * VSIs that are managed by this PF. **/ -static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) +static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) { int v, ret = 0; for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { /* No need to wait for FCoE VSI queues */ if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { - ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); + ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); if (ret) break; } @@ -5726,8 +5741,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, if (ret) goto exit; - /* Wait for the PF's Tx queues to be disabled */ - ret = i40e_pf_wait_txq_disabled(pf); + /* Wait for the PF's queues to be disabled */ + ret = i40e_pf_wait_queues_disabled(pf); if (ret) { /* Schedule PF reset to recover */ set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); -- cgit v1.2.3 From 730a8f8777e55912f445c2c29234d51cceb1dfc2 Mon Sep 17 00:00:00 2001 From: Carolyn Wyborny Date: Wed, 17 Feb 2016 16:12:16 -0800 Subject: i40e: Fix for unexpected messaging This fixes an issue where a previously removed message has returned. Changing the message type to dev_dbg leaves the info, if desired, but takes it out of normal everyday usage. Also changed call to only provide port data when its valid and not when its not (delete case). Change-ID: Ief6f33b915f6364c24fa8e5789c2fc3168b5e2ed Signed-off-by: Carolyn Wyborny Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 53660f1bbc3e..05def9f67ef3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7109,12 +7109,13 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ret = i40e_aq_del_udp_tunnel(hw, i, NULL); if (ret) { - dev_info(&pf->pdev->dev, - "%s vxlan port %d, index %d failed, err %s aq_err %s\n", - port ? "add" : "delete", - ntohs(port), i, - i40e_stat_str(&pf->hw, ret), - i40e_aq_str(&pf->hw, + dev_dbg(&pf->pdev->dev, + "%s %s port %d, index %d failed, err %s aq_err %s\n", + pf->udp_ports[i].type ? "vxlan" : "geneve", + port ? "add" : "delete", + ntohs(port), i, + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); pf->udp_ports[i].index = 0; } -- cgit v1.2.3 From fe726082728da9f653d4e747baf0816d826fc626 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Jain Date: Wed, 17 Feb 2016 16:12:17 -0800 Subject: i40e: Expose some registers to program parser, FD and RSS logic This patch adds 7 new register definitions for programming the parser, flow director and RSS blocks in the HW. Change-ID: I31e76673125275f3c69a14c646361919d04dc987 Signed-off-by: Anjali Singhai Jain Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_register.h | 48 +++++++++++++++++++++++++ 1 file changed, 48 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index dc0402fe3370..86ca27f72f02 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -2045,6 +2045,14 @@ #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 #define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) +#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_GL_PRS_FVBM_MAX_INDEX 3 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0 +#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8 +#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT) +#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31 +#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT) #define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 #define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) @@ -2216,6 +2224,14 @@ #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) +#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ +#define I40E_PRTQF_FD_INSET_MAX_INDEX 63 +#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0 +#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT) #define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_MSK_MAX_INDEX 63 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 @@ -5155,6 +5171,38 @@ #define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) +#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_FD_MSK_MAX_INDEX 1 +#define I40E_GLQF_FD_MSK_MASK_SHIFT 0 +#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT) +#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT) +#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_INSET_MAX_INDEX 1 +#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0 +#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT) +#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_HASH_MSK_MAX_INDEX 1 +#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0 +#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT) +#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16 +#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT) +#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */ +#define I40E_GLQF_ORT_MAX_INDEX 63 +#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0 +#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT) +#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5 +#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT) +#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7 +#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) +#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */ +#define I40E_GLQF_PIT_MAX_INDEX 23 +#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0 +#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT) +#define I40E_GLQF_PIT_FSIZE_SHIFT 5 +#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT) +#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10 +#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT) #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 #define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 -- cgit v1.2.3 From f534039dd8ab39cb3259e5860d2be3b0e70aacbf Mon Sep 17 00:00:00 2001 From: John Underwood Date: Thu, 18 Feb 2016 09:19:24 -0800 Subject: i40e: add check for null VSI Return from i40e_vsi_reinit_setup() if vsi param is NULL. This makes this code consistent with all the other code that checks for NULL before using one of the VSI pointers accessed with an indexed variable. (Indexed VSI pointers are intentionally set to NULL in i40e_vsi_clear() and i40e_remove(). Change-ID: I3bc8b909c70fd2439334eeae994d151f61480985 Signed-off-by: John Underwood Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 05def9f67ef3..3ff3e83ffd92 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9583,10 +9583,15 @@ vector_setup_out: **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { - struct i40e_pf *pf = vsi->back; + struct i40e_pf *pf; u8 enabled_tc; int ret; + if (!vsi) + return NULL; + + pf = vsi->back; + i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_clear_rings(vsi); -- cgit v1.2.3 From 3336514381f9ef99c50e5337ae1bf36f8138679d Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 17 Feb 2016 16:12:19 -0800 Subject: i40e: add adminq commands for Rx CTL registers Add the new opcodes and struct used for asking the firmware to update Rx control registers that need extra care when being accessed while under heavy traffic - e.g. sustained 64byte packets at line rate on all ports. The firmware will take extra steps to be sure the register accesses are successful. The registers involved are: PFQF_CTL_0 PFQF_HENA PFQF_FDALLOC PFQF_HREGION PFLAN_QALLOC VPQF_CTL VFQF_HENA VFQF_HREGION VSIQF_CTL VSILAN_QBASE VSILAN_QTABLE VSIQF_TCREGION PFQF_HKEY VFQF_HKEY PRTQF_CTL_0 GLFCOE_RCTL GLFCOE_RSOF GLQF_CTL GLQF_SWAP GLQF_HASH_MSK GLQF_HASH_INSET GLQF_HSYM GLQF_FC_MSK GLQF_FC_INSET GLQF_FD_MSK PRTQF_FD_INSET PRTQF_FD_FLXINSET PRTQF_FD_MSK Change-ID: I56c8144000da66ad99f68948d8a184b2ec2aeb3e Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h | 16 ++++++++++++++++ drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index bb7ecbbdf948..8d5c65ab6267 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -146,6 +146,8 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -696,6 +698,20 @@ struct i40e_aqc_set_switch_config { I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h index 815e481ccd9c..aad8d6277110 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h @@ -146,6 +146,8 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_get_switch_resource_alloc = 0x0204, i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_update_vsi_parameters = 0x0211, @@ -693,6 +695,20 @@ struct i40e_aqc_set_switch_config { I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); +/* Read Receive control registers (direct 0x0206) + * Write Receive control registers (direct 0x0207) + * used for accessing Rx control registers that can be + * slow and need special handling when under high Rx load + */ +struct i40e_aqc_rx_ctl_reg_read_write { + __le32 reserved1; + __le32 address; + __le32 reserved2; + __le32 value; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); + /* Add VSI (indirect 0x0210) * this indirect command uses struct i40e_aqc_vsi_properties_data * as the indirect buffer (128 bytes) -- cgit v1.2.3 From f658137cbb1fddbe40ec7f1a2cebaf9dc9484ea7 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 17 Feb 2016 16:12:20 -0800 Subject: i40e: implement and use Rx CTL helper functions Use the new AdminQ functions for safely accessing the Rx control registers that may be affected by heavy small packet traffic. Change-ID: Ibb00983e8dcba71f4b760222a609a5fcaa726f18 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 128 ++++++++++++++++++++- drivers/net/ethernet/intel/i40e/i40e_prototype.h | 8 ++ drivers/net/ethernet/intel/i40evf/i40e_common.c | 125 ++++++++++++++++++++ drivers/net/ethernet/intel/i40evf/i40e_prototype.h | 8 ++ 4 files changed, 266 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 3a57e59c2a1a..74d95999f393 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1328,7 +1328,7 @@ void i40e_clear_hw(struct i40e_hw *hw) num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; - val = rd32(hw, I40E_PFLAN_QALLOC); + val = i40e_read_rx_ctl(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> @@ -3882,7 +3882,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, return ret; /* Read the PF Queue Filter control register */ - val = rd32(hw, I40E_PFQF_CTL_0); + val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); /* Program required PE hash buckets for the PF */ val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; @@ -3919,7 +3919,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, if (settings->enable_macvlan) val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; - wr32(hw, I40E_PFQF_CTL_0, val); + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val); return 0; } @@ -4575,3 +4575,125 @@ restore_config: phy_addr, led_ctl); return status; } + +/** + * i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val) + return I40E_ERR_PARAM; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == 0) + *reg_val = le32_to_cpu(cmd_resp->value); + + return status; +} + +/** + * i40e_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40e_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value = cpu_to_le32(reg_val); + + status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40e_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40e_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ca2f7acd9f8b..d51eee5bf79a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -342,6 +342,14 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); +i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c index 938783e0baac..771ac6ad8cda 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c @@ -903,6 +903,131 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { I40E_PTT_UNUSED_ENTRY(255) }; +/** + * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: ptr to register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to read the Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + if (!reg_val) + return I40E_ERR_PARAM; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_rx_ctl_reg_read); + + cmd_resp->address = cpu_to_le32(reg_addr); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + if (status == 0) + *reg_val = le32_to_cpu(cmd_resp->value); + + return status; +} + +/** + * i40evf_read_rx_ctl - read from an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + **/ +u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + u32 val = 0; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, + &val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + val = rd32(hw, reg_addr); + + return val; +} + +/** + * i40evf_aq_rx_ctl_write_register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + * @cmd_details: pointer to command details structure or NULL + * + * Use the firmware to write to an Rx control register, + * especially useful if the Rx unit is under heavy pressure + **/ +i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details) +{ + struct i40e_aq_desc desc; + struct i40e_aqc_rx_ctl_reg_read_write *cmd = + (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; + i40e_status status; + + i40evf_fill_default_direct_cmd_desc(&desc, + i40e_aqc_opc_rx_ctl_reg_write); + + cmd->address = cpu_to_le32(reg_addr); + cmd->value = cpu_to_le32(reg_val); + + status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); + + return status; +} + +/** + * i40evf_write_rx_ctl - write to an Rx control register + * @hw: pointer to the hw struct + * @reg_addr: register address + * @reg_val: register value + **/ +void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) +{ + i40e_status status = 0; + bool use_register; + int retry = 5; + + use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5); + if (!use_register) { +do_retry: + status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, + reg_val, NULL); + if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { + usleep_range(1000, 2000); + retry--; + goto do_retry; + } + } + + /* if the AQ access failed, try the old-fashioned way */ + if (status || use_register) + wr32(hw, reg_addr, reg_val); +} + /** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h index fa34d859e015..d89d52109efa 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h @@ -103,6 +103,14 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, u16 vsi_seid); +i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, + u32 reg_addr, u32 *reg_val, + struct i40e_asq_cmd_details *cmd_details); +u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); +i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, + u32 reg_addr, u32 reg_val, + struct i40e_asq_cmd_details *cmd_details); +void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, u8 phy_addr, u16 *value); i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, -- cgit v1.2.3 From 272cdaf2472ab7713deebe060bb90319b0382a94 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Wed, 17 Feb 2016 16:12:21 -0800 Subject: i40e: Use the new rx ctl register helpers. Don't use AQ calls from clear_hw. Use the new AdminQ functions for safely accessing the Rx control registers that may be affected by heavy small packet traffic. We can't use AdminQ calls in i40e_clear_hw() because the HW is being initialized and the AdminQ is not alive. We recently added an AQ related replacement for reading PFLAN_QALLOC, and this patch puts back the original register read. Change-ID: Ib027168c954a5733299aa3a4ce5f8218c6bb5636 Signed-off-by: Shannon Nelson Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_common.c | 2 +- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 8 ++++---- drivers/net/ethernet/intel/i40e/i40e_fcoe.c | 8 ++++---- drivers/net/ethernet/intel/i40e/i40e_main.c | 20 ++++++++++---------- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 7 ++++--- 5 files changed, 23 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 74d95999f393..4596294c2ab1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1328,7 +1328,7 @@ void i40e_clear_hw(struct i40e_hw *hw) num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >> I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT; - val = i40e_read_rx_ctl(hw, I40E_PFLAN_QALLOC); + val = rd32(hw, I40E_PFLAN_QALLOC); base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >> I40E_PFLAN_QALLOC_FIRSTQ_SHIFT; j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >> diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index f1ad10161792..a85bc94cdf87 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2181,8 +2181,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) { struct i40e_hw *hw = &pf->hw; - u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | - ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | + ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); /* RSS does not support anything other than hashing * to queues on src and dst IPs and ports @@ -2291,8 +2291,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } - wr32(hw, I40E_PFQF_HENA(0), (u32)hena); - wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_flush(hw); /* Save setting for future output/update */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c index 052df93f1da4..8ad162c16f61 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c +++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c @@ -295,11 +295,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) } /* enable FCoE hash filter */ - val = rd32(hw, I40E_PFQF_HENA(1)); + val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)); val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; - wr32(hw, I40E_PFQF_HENA(1), val); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val); /* enable flag */ pf->flags |= I40E_FLAG_FCOE_ENABLED; @@ -317,11 +317,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; /* Setup max frame with FCoE_MTU plus L2 overheads */ - val = rd32(hw, I40E_GLFCOE_RCTL); + val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL); val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK; val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT); - wr32(hw, I40E_GLFCOE_RCTL, val); + i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val); dev_info(&pf->pdev->dev, "FCoE is supported.\n"); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3ff3e83ffd92..87325dbea42c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8032,7 +8032,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]); } if (lut) { @@ -8069,7 +8069,7 @@ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, u32 *seed_dw = (u32 *)seed; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); + seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); } if (lut) { u32 *lut_dw = (u32 *)lut; @@ -8152,19 +8152,19 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) int ret; /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ - hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | - ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); + hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | + ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); hena |= i40e_pf_get_default_rss_hena(pf); - wr32(hw, I40E_PFQF_HENA(0), (u32)hena); - wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); /* Determine the RSS table size based on the hardware capabilities */ - reg_val = rd32(hw, I40E_PFQF_CTL_0); + reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0); reg_val = (pf->rss_table_size == 512) ? (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); - wr32(hw, I40E_PFQF_CTL_0, reg_val); + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val); /* Determine the RSS size of the VSI */ if (!vsi->rss_size) @@ -11211,8 +11211,8 @@ static void i40e_remove(struct pci_dev *pdev) i40e_ptp_stop(pf); /* Disable RSS in hw */ - wr32(hw, I40E_PFQF_HENA(0), 0); - wr32(hw, I40E_PFQF_HENA(1), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ set_bit(__I40E_DOWN, &pf->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 5dcd19869a41..93d8d9849c0c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -602,8 +602,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) * that VF queues be mapped using this method, even when they are * contiguous in real life */ - wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), - I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); + i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), + I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); /* enable VF vplan_qtable mappings */ reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK; @@ -630,7 +630,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) (j * 2) + 1); reg |= qid << 16; } - wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); + i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), + reg); } i40e_flush(hw); -- cgit v1.2.3 From a4618ec88de95a86f290d01c74c506552f1a5d95 Mon Sep 17 00:00:00 2001 From: Pandi Kumar Maharajan Date: Thu, 18 Feb 2016 09:19:25 -0800 Subject: i40e: suspend scheduling during driver unload We need to suspend scheduling or any pending service task during driver unload process, so that new task will not be scheduled. This patch sets the suspend flag bit during reload which avoids service task execution. Change-ID: I017c57b5d6656564556e3c5387da671369a572ac Signed-off-by: Pandi Kumar Maharajan Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 87325dbea42c..9076b2975554 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11215,6 +11215,7 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); /* no more scheduling of any task */ + set_bit(__I40E_SUSPENDED, &pf->state); set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); -- cgit v1.2.3 From 35f3472a750b3549f7f914ed96f41f0c2ca284f3 Mon Sep 17 00:00:00 2001 From: Mitch Williams Date: Wed, 17 Feb 2016 16:12:23 -0800 Subject: i40e: let go of the past If we reset a VF, its VSI goes away, and it gets a new one. So don't hang on to the now-stale local VSI pointer. It just leads to suffering and kernel panics. Change-ID: Ia8823b4e85893e95e963acee284968022b29177a Signed-off-by: Mitch Williams Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 93d8d9849c0c..acd2693a4e97 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2203,6 +2203,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, * and then reloading the VF driver. */ i40e_vc_disable_vf(pf, vf); + /* During reset the VF got a new VSI, so refresh the pointer. */ + vsi = pf->vsi[vf->lan_vsi_idx]; } /* Check for condition where there was already a port VLAN ID -- cgit v1.2.3 From b8f1343a13c669aaa3d475ed8513a32154ae5ffd Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Wed, 17 Feb 2016 16:12:24 -0800 Subject: i40e/i40evf: Bump i40e to 1.4.25 and i40evf to 1.4.15 Bump. Change-ID: Ifa19aadaa892ad103f1b96fe2361fa690912c6a3 Signed-off-by: Jesse Brandeburg Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/i40e/i40e_main.c | 2 +- drivers/net/ethernet/intel/i40evf/i40evf_main.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9076b2975554..3c8d8c4491f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -46,7 +46,7 @@ static const char i40e_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 15 +#define DRV_VERSION_BUILD 25 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 3396fe32cc6d..4b70aae2fa84 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = #define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MINOR 4 -#define DRV_VERSION_BUILD 11 +#define DRV_VERSION_BUILD 15 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ -- cgit v1.2.3 From 7c4a1d0cfdc169b746e61423ebf49b68133bc50a Mon Sep 17 00:00:00 2001 From: Sergio Prado Date: Tue, 16 Feb 2016 21:10:45 -0200 Subject: net: macb: make magic-packet property generic As requested by Rob Herring on patch https://patchwork.ozlabs.org/patch/580862/. This is a new property that it's still in net-next and has never been used in production, so we are not breaking anything with the incompatible binding change. Signed-off-by: Sergio Prado Acked-by: Rob Herring Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/macb.txt | 2 +- drivers/net/ethernet/cadence/macb.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt index c6b1cb5ffa87..b5a42df4c928 100644 --- a/Documentation/devicetree/bindings/net/macb.txt +++ b/Documentation/devicetree/bindings/net/macb.txt @@ -25,7 +25,7 @@ Required properties: Optional properties for PHY child node: - reset-gpios : Should specify the gpio for phy reset -- cdns,magic-packet : If present, indicates that the hardware supports waking +- magic-packet : If present, indicates that the hardware supports waking up via magic packet. Examples: diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 69af049e55a8..7ccf2298a5fa 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2929,7 +2929,7 @@ static int macb_probe(struct platform_device *pdev) bp->jumbo_max_len = macb_config->jumbo_max_len; bp->wol = 0; - if (of_get_property(np, "cdns,magic-packet", NULL)) + if (of_get_property(np, "magic-packet", NULL)) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); -- cgit v1.2.3 From 68f227930b887ef9f18065f15a17aa9dc1ac2d31 Mon Sep 17 00:00:00 2001 From: Padmanabh Ratnakar Date: Thu, 18 Feb 2016 03:09:34 +0530 Subject: be2net: Fix pcie error recovery in case of NIC+RoCE adapters Interrupts registered by RoCE driver are not unregistered when msix interrupts are disabled during error recovery causing a crash. Detach the adapter instance from RoCE driver when error is detected to complete the cleanup. Attach the driver again after the adapter is recovered from error. Signed-off-by: Padmanabh Ratnakar Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 88f427cb76c3..46248467e206 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -5456,6 +5456,8 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, dev_err(&adapter->pdev->dev, "EEH error detected\n"); + be_roce_dev_remove(adapter); + if (!be_check_error(adapter, BE_ERROR_EEH)) { be_set_error(adapter, BE_ERROR_EEH); @@ -5520,6 +5522,8 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; + be_roce_dev_add(adapter); + be_schedule_err_detection(adapter, ERR_DETECTION_DELAY); return; err: -- cgit v1.2.3 From 76d13b568776fdef646c70878de406c90aef46a9 Mon Sep 17 00:00:00 2001 From: "sixiao@microsoft.com" Date: Wed, 17 Feb 2016 16:43:59 -0800 Subject: hv_netvsc: add software transmit timestamp support Enable skb_tx_timestamp in hyperv netvsc. Signed-off-by: Simon Xiao Reviewed-by: K. Y. Srinivasan Reviewed-by: Haiyang Zhang Signed-off-by: David S. Miller --- drivers/net/hyperv/netvsc_drv.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index c72e5b83afdb..202e2b179509 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -550,6 +550,8 @@ do_send: packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, skb, packet, &pb); + /* timestamp packet in software */ + skb_tx_timestamp(skb); ret = netvsc_send(net_device_ctx->device_ctx, packet, rndis_msg, &pb, skb); @@ -920,6 +922,7 @@ static const struct ethtool_ops ethtool_ops = { .get_link = ethtool_op_get_link, .get_channels = netvsc_get_channels, .set_channels = netvsc_set_channels, + .get_ts_info = ethtool_op_get_ts_info, }; static const struct net_device_ops device_ops = { -- cgit v1.2.3 From a75e8005d506f374554b17383c39aa82db0ea860 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 19 Feb 2016 09:24:04 -0500 Subject: i40e: queue-specific settings for interrupt moderation For i40e driver, each vector has its own ITR register. However, there are no concept of queue-specific settings in the driver proper. Only global variable is used to store ITR values. That will cause problems especially when resetting the vector. The specific ITR values could be lost. This patch move rx_itr_setting and tx_itr_setting to i40e_ring to store specific ITR register for each queue. i40e_get_coalesce and i40e_set_coalesce are also modified accordingly to support queue-specific settings. To make it compatible with old ethtool, if user doesn't specify the queue number, i40e_get_coalesce will return queue 0's value. While i40e_set_coalesce will apply value to all queues. Signed-off-by: Kan Liang Acked-by: Shannon Nelson Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e.h | 7 -- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 15 ++- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 139 ++++++++++++++++--------- drivers/net/ethernet/intel/i40e/i40e_main.c | 12 +-- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 9 +- drivers/net/ethernet/intel/i40e/i40e_txrx.h | 8 ++ 6 files changed, 120 insertions(+), 70 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index e99be9f696c3..2f6210ae8ba0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -521,13 +521,6 @@ struct i40e_vsi { struct i40e_ring **tx_rings; u16 work_limit; - /* high bit set means dynamic, use accessor routines to read/write. - * hardware only supports 2us resolution for the ITR registers. - * these values always store the USER setting, and must be converted - * before programming to a register. - */ - u16 rx_itr_setting; - u16 tx_itr_setting; u16 int_rate_limit; /* value in usecs */ u16 rss_table_size; /* HW RSS table size */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 2a44f2e25a26..0c97733d253c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -302,6 +302,10 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) " rx_rings[%i]: vsi = %p, q_vector = %p\n", i, rx_ring->vsi, rx_ring->q_vector); + dev_info(&pf->pdev->dev, + " rx_rings[%i]: rx_itr_setting = %d (%s)\n", + i, rx_ring->rx_itr_setting, + ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); @@ -352,14 +356,15 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " tx_rings[%i]: DCB tc = %d\n", i, tx_ring->dcb_tc); + dev_info(&pf->pdev->dev, + " tx_rings[%i]: tx_itr_setting = %d (%s)\n", + i, tx_ring->tx_itr_setting, + ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed"); } rcu_read_unlock(); dev_info(&pf->pdev->dev, - " work_limit = %d, rx_itr_setting = %d (%s), tx_itr_setting = %d (%s)\n", - vsi->work_limit, vsi->rx_itr_setting, - ITR_IS_DYNAMIC(vsi->rx_itr_setting) ? "dynamic" : "fixed", - vsi->tx_itr_setting, - ITR_IS_DYNAMIC(vsi->tx_itr_setting) ? "dynamic" : "fixed"); + " work_limit = %d\n", + vsi->work_limit); dev_info(&pf->pdev->dev, " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n", vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a85bc94cdf87..a4705999cbc9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1879,8 +1879,9 @@ static int i40e_set_phys_id(struct net_device *netdev, * 125us (8000 interrupts per second) == ITR(62) */ -static int i40e_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int __i40e_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + int queue) { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; @@ -1888,14 +1889,24 @@ static int i40e_get_coalesce(struct net_device *netdev, ec->tx_max_coalesced_frames_irq = vsi->work_limit; ec->rx_max_coalesced_frames_irq = vsi->work_limit; - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) + /* rx and tx usecs has per queue value. If user doesn't specify the queue, + * return queue 0's value to represent. + */ + if (queue < 0) { + queue = 0; + } else if (queue >= vsi->num_queue_pairs) { + return -EINVAL; + } + + if (ITR_IS_DYNAMIC(vsi->rx_rings[queue]->rx_itr_setting)) ec->use_adaptive_rx_coalesce = 1; - if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) + if (ITR_IS_DYNAMIC(vsi->tx_rings[queue]->tx_itr_setting)) ec->use_adaptive_tx_coalesce = 1; - ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC; - ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->rx_coalesce_usecs = vsi->rx_rings[queue]->rx_itr_setting & ~I40E_ITR_DYNAMIC; + ec->tx_coalesce_usecs = vsi->tx_rings[queue]->tx_itr_setting & ~I40E_ITR_DYNAMIC; + /* we use the _usecs_high to store/set the interrupt rate limit * that the hardware supports, that almost but not quite * fits the original intent of the ethtool variable, @@ -1908,15 +1919,57 @@ static int i40e_get_coalesce(struct net_device *netdev, return 0; } -static int i40e_set_coalesce(struct net_device *netdev, +static int i40e_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) { - struct i40e_netdev_priv *np = netdev_priv(netdev); + return __i40e_get_coalesce(netdev, ec, -1); +} + +static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40e_pf *pf = vsi->back; + struct i40e_hw *hw = &pf->hw; struct i40e_q_vector *q_vector; + u16 vector, intrl; + + intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); + + vsi->rx_rings[queue]->rx_itr_setting = ec->rx_coalesce_usecs; + vsi->tx_rings[queue]->tx_itr_setting = ec->tx_coalesce_usecs; + + if (ec->use_adaptive_rx_coalesce) + vsi->rx_rings[queue]->rx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->rx_rings[queue]->rx_itr_setting &= ~I40E_ITR_DYNAMIC; + + if (ec->use_adaptive_tx_coalesce) + vsi->tx_rings[queue]->tx_itr_setting |= I40E_ITR_DYNAMIC; + else + vsi->tx_rings[queue]->tx_itr_setting &= ~I40E_ITR_DYNAMIC; + + q_vector = vsi->rx_rings[queue]->q_vector; + q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[queue]->rx_itr_setting); + vector = vsi->base_vector + q_vector->v_idx; + wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); + + q_vector = vsi->tx_rings[queue]->q_vector; + q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[queue]->tx_itr_setting); + vector = vsi->base_vector + q_vector->v_idx; + wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); + + wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); + i40e_flush(hw); +} + +static int __i40e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + int queue) +{ + struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; - struct i40e_hw *hw = &pf->hw; - u16 vector; int i; if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) @@ -1933,59 +1986,49 @@ static int i40e_set_coalesce(struct net_device *netdev, return -EINVAL; } - vector = vsi->base_vector; - if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && - (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { - vsi->rx_itr_setting = ec->rx_coalesce_usecs; - } else if (ec->rx_coalesce_usecs == 0) { - vsi->rx_itr_setting = ec->rx_coalesce_usecs; + if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); - } else { - netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); - return -EINVAL; + } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) || + (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) { + netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); + return -EINVAL; } vsi->int_rate_limit = ec->rx_coalesce_usecs_high; - if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) && - (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) { - vsi->tx_itr_setting = ec->tx_coalesce_usecs; - } else if (ec->tx_coalesce_usecs == 0) { - vsi->tx_itr_setting = ec->tx_coalesce_usecs; + if (ec->tx_coalesce_usecs == 0) { if (ec->use_adaptive_tx_coalesce) netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); - } else { - netif_info(pf, drv, netdev, - "Invalid value, tx-usecs range is 0-8160\n"); - return -EINVAL; + } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) || + (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) { + netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); + return -EINVAL; } - if (ec->use_adaptive_rx_coalesce) - vsi->rx_itr_setting |= I40E_ITR_DYNAMIC; - else - vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC; - - if (ec->use_adaptive_tx_coalesce) - vsi->tx_itr_setting |= I40E_ITR_DYNAMIC; - else - vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; - - for (i = 0; i < vsi->num_q_vectors; i++, vector++) { - u16 intrl = INTRL_USEC_TO_REG(vsi->int_rate_limit); - - q_vector = vsi->q_vectors[i]; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); - wr32(hw, I40E_PFINT_ITRN(0, vector - 1), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); - wr32(hw, I40E_PFINT_ITRN(1, vector - 1), q_vector->tx.itr); - wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl); - i40e_flush(hw); + /* rx and tx usecs has per queue value. If user doesn't specify the queue, + * apply to all queues. + */ + if (queue < 0) { + for (i = 0; i < vsi->num_queue_pairs; i++) + i40e_set_itr_per_queue(vsi, ec, i); + } else if (queue < vsi->num_queue_pairs) { + i40e_set_itr_per_queue(vsi, ec, queue); + } else { + netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", + vsi->num_queue_pairs - 1); + return -EINVAL; } return 0; } +static int i40e_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + return __i40e_set_coalesce(netdev, ec, -1); +} + /** * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type * @pf: pointer to the physical function struct diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 3c8d8c4491f8..70d9605a0d9e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3124,11 +3124,11 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi) struct i40e_q_vector *q_vector = vsi->q_vectors[i]; q_vector->itr_countdown = ITR_COUNTDOWN_START; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting); q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr); @@ -3220,10 +3220,10 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi) /* set the ITR configuration */ q_vector->itr_countdown = ITR_COUNTDOWN_START; - q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); + q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting); q_vector->rx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr); - q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); + q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting); q_vector->tx.latency_range = I40E_LOW_LATENCY; wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr); @@ -7322,8 +7322,6 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) set_bit(__I40E_DOWN, &vsi->state); vsi->flags = 0; vsi->idx = vsi_idx; - vsi->rx_itr_setting = pf->rx_itr_default; - vsi->tx_itr_setting = pf->tx_itr_default; vsi->int_rate_limit = 0; vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ? pf->rss_table_size : 64; @@ -7490,6 +7488,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) tx_ring->dcb_tc = 0; if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; + tx_ring->tx_itr_setting = pf->tx_itr_default; vsi->tx_rings[i] = tx_ring; rx_ring = &tx_ring[1]; @@ -7506,6 +7505,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) set_ring_16byte_desc_enabled(rx_ring); else clear_ring_16byte_desc_enabled(rx_ring); + rx_ring->rx_itr_setting = pf->rx_itr_default; vsi->rx_rings[i] = rx_ring; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index cb52f39d514a..084d0ab316b7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1882,6 +1882,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, bool rx = false, tx = false; u32 rxval, txval; int vector; + int idx = q_vector->v_idx; vector = (q_vector->v_idx + vsi->base_vector); @@ -1891,17 +1892,17 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0); if (q_vector->itr_countdown > 0 || - (!ITR_IS_DYNAMIC(vsi->rx_itr_setting) && - !ITR_IS_DYNAMIC(vsi->tx_itr_setting))) { + (!ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting) && + !ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting))) { goto enable_int; } - if (ITR_IS_DYNAMIC(vsi->rx_itr_setting)) { + if (ITR_IS_DYNAMIC(vsi->rx_rings[idx]->rx_itr_setting)) { rx = i40e_set_new_dynamic_itr(&q_vector->rx); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); } - if (ITR_IS_DYNAMIC(vsi->tx_itr_setting)) { + if (ITR_IS_DYNAMIC(vsi->tx_rings[idx]->tx_itr_setting)) { tx = i40e_set_new_dynamic_itr(&q_vector->tx); txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 56009709528a..cdd5dc00aec5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -248,6 +248,14 @@ struct i40e_ring { u8 dcb_tc; /* Traffic class of ring */ u8 __iomem *tail; + /* high bit set means dynamic, use accessor routines to read/write. + * hardware only supports 2us resolution for the ITR registers. + * these values always store the USER setting, and must be converted + * before programming to a register. + */ + u16 rx_itr_setting; + u16 tx_itr_setting; + u16 count; /* Number of descriptors */ u16 reg_idx; /* HW register index of the ring */ u16 rx_hdr_len; -- cgit v1.2.3 From be280bad15fafc0f7e7b90bdbd99170366f5e9bf Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 19 Feb 2016 09:24:05 -0500 Subject: i40e/ethtool: support coalesce getting by queue This patch implements get_per_queue_coalesce for i40e driver. Signed-off-by: Kan Liang Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index a4705999cbc9..dd572abddb8e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1925,6 +1925,12 @@ static int i40e_get_coalesce(struct net_device *netdev, return __i40e_get_coalesce(netdev, ec, -1); } +static int i40e_get_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __i40e_get_coalesce(netdev, ec, queue); +} + static void i40e_set_itr_per_queue(struct i40e_vsi *vsi, struct ethtool_coalesce *ec, int queue) @@ -2914,6 +2920,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_ts_info = i40e_get_ts_info, .get_priv_flags = i40e_get_priv_flags, .set_priv_flags = i40e_set_priv_flags, + .get_per_queue_coalesce = i40e_get_per_queue_coalesce, }; void i40e_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From f3757a4d9e823c43ccfe4db02b6cda77414e25f7 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 19 Feb 2016 09:24:06 -0500 Subject: i40e/ethtool: support coalesce setting by queue This patch implements set_per_queue_coalesce for i40e driver. Signed-off-by: Kan Liang Acked-by: Jeff Kirsher Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index dd572abddb8e..784b1659457a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2035,6 +2035,12 @@ static int i40e_set_coalesce(struct net_device *netdev, return __i40e_set_coalesce(netdev, ec, -1); } +static int i40e_set_per_queue_coalesce(struct net_device *netdev, u32 queue, + struct ethtool_coalesce *ec) +{ + return __i40e_set_coalesce(netdev, ec, queue); +} + /** * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type * @pf: pointer to the physical function struct @@ -2921,6 +2927,7 @@ static const struct ethtool_ops i40e_ethtool_ops = { .get_priv_flags = i40e_get_priv_flags, .set_priv_flags = i40e_set_priv_flags, .get_per_queue_coalesce = i40e_get_per_queue_coalesce, + .set_per_queue_coalesce = i40e_set_per_queue_coalesce, }; void i40e_set_ethtool_ops(struct net_device *netdev) -- cgit v1.2.3 From 321b4d4bd12f90e7497c9ab057aafcc2649aa902 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 20 Feb 2016 00:35:29 +0100 Subject: phy: marvell/micrel: Fix Unpossible condition commit 2b2427d06426 ("phy: micrel: Add ethtool statistics counters") from Dec 30, 2015, leads to the following static checker warning: drivers/net/phy/micrel.c:609 kszphy_get_stat() warn: unsigned 'val' is never less than zero. drivers/net/phy/micrel.c 602 static u64 kszphy_get_stat(struct phy_device *phydev, int i) 603 { 604 struct kszphy_hw_stat stat = kszphy_hw_stats[i]; 605 struct kszphy_priv *priv = phydev->priv; 606 u64 val; 607 608 val = phy_read(phydev, stat.reg); 609 if (val < 0) { ^^^^^^^ Unpossible! 610 val = UINT64_MAX; 611 } else { 612 val = val & ((1 << stat.bits) - 1); 613 priv->stats[i] += val; 614 val = priv->stats[i]; 615 } 616 617 return val; 618 } The same problem exists in the Marvell driver. Fix both. Fixes: 2b2427d06426 ("phy: micrel: Add ethtool statistics counters") Reported-by: Dan Carpenter Reported-by: Julia.Lawall Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 10 +++++----- drivers/net/phy/micrel.c | 9 +++++---- 2 files changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 1dcbd3ff9e38..d0168f1a1bf0 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1065,8 +1065,8 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) { struct marvell_hw_stat stat = marvell_hw_stats[i]; struct marvell_priv *priv = phydev->priv; - int err, oldpage; - u64 val; + int err, oldpage, val; + u64 ret; oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE); err = phy_write(phydev, MII_MARVELL_PHY_PAGE, @@ -1076,16 +1076,16 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i) val = phy_read(phydev, stat.reg); if (val < 0) { - val = UINT64_MAX; + ret = UINT64_MAX; } else { val = val & ((1 << stat.bits) - 1); priv->stats[i] += val; - val = priv->stats[i]; + ret = priv->stats[i]; } phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage); - return val; + return ret; } static void marvell_get_stats(struct phy_device *phydev, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 03833dbfca67..48219c83fb00 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -612,18 +612,19 @@ static u64 kszphy_get_stat(struct phy_device *phydev, int i) { struct kszphy_hw_stat stat = kszphy_hw_stats[i]; struct kszphy_priv *priv = phydev->priv; - u64 val; + int val; + u64 ret; val = phy_read(phydev, stat.reg); if (val < 0) { - val = UINT64_MAX; + ret = UINT64_MAX; } else { val = val & ((1 << stat.bits) - 1); priv->stats[i] += val; - val = priv->stats[i]; + ret = priv->stats[i]; } - return val; + return ret; } static void kszphy_get_stats(struct phy_device *phydev, -- cgit v1.2.3 From f49cbe6b7988b51aa2b72c45d4332fabea62fba6 Mon Sep 17 00:00:00 2001 From: Damien Riegel Date: Tue, 12 Jan 2016 17:31:18 -0500 Subject: can: sja1000: of: add per-compatible init hook This commit adds the capability to allocate and init private data embedded in the sja1000_priv structure on a per-compatible basis. The device node is passed as a parameter of the init callback to allow parsing of custom device tree properties. Signed-off-by: Damien Riegel Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/sja1000_platform.c | 40 +++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 0552ed46a206..777d312f1779 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include "sja1000.h" @@ -40,6 +41,11 @@ MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); MODULE_ALIAS("platform:" DRV_NAME); MODULE_LICENSE("GPL v2"); +struct sja1000_of_data { + size_t priv_sz; + int (*init)(struct sja1000_priv *priv, struct device_node *of); +}; + static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg); @@ -154,6 +160,12 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of) priv->cdr |= CDR_CBP; /* default */ } +static const struct of_device_id sp_of_table[] = { + { .compatible = "nxp,sja1000", .data = NULL, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, sp_of_table); + static int sp_probe(struct platform_device *pdev) { int err, irq = 0; @@ -163,6 +175,9 @@ static int sp_probe(struct platform_device *pdev) struct resource *res_mem, *res_irq = NULL; struct sja1000_platform_data *pdata; struct device_node *of = pdev->dev.of_node; + const struct of_device_id *of_id; + const struct sja1000_of_data *of_data = NULL; + size_t priv_sz = 0; pdata = dev_get_platdata(&pdev->dev); if (!pdata && !of) { @@ -191,7 +206,13 @@ static int sp_probe(struct platform_device *pdev) if (!irq && !res_irq) return -ENODEV; - dev = alloc_sja1000dev(0); + of_id = of_match_device(sp_of_table, &pdev->dev); + if (of_id && of_id->data) { + of_data = of_id->data; + priv_sz = of_data->priv_sz; + } + + dev = alloc_sja1000dev(priv_sz); if (!dev) return -ENOMEM; priv = netdev_priv(dev); @@ -208,10 +229,17 @@ static int sp_probe(struct platform_device *pdev) dev->irq = irq; priv->reg_base = addr; - if (of) + if (of) { sp_populate_of(priv, of); - else + + if (of_data && of_data->init) { + err = of_data->init(priv, of); + if (err) + goto exit_free; + } + } else { sp_populate(priv, pdata, res_mem->flags); + } platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); @@ -242,12 +270,6 @@ static int sp_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id sp_of_table[] = { - {.compatible = "nxp,sja1000"}, - {}, -}; -MODULE_DEVICE_TABLE(of, sp_of_table); - static struct platform_driver sp_driver = { .probe = sp_probe, .remove = sp_remove, -- cgit v1.2.3 From dfb86c0d5a9d1c068954d776bf66e3d0b27beb68 Mon Sep 17 00:00:00 2001 From: Damien Riegel Date: Tue, 12 Jan 2016 17:31:20 -0500 Subject: can: sja1000: of: add compatibility with Technologic Systems version Technologic Systems provides an IP compatible with the SJA1000, instantiated in an FPGA. Because of some bus widths issue, access to registers is made through a "window" that works like this: base + 0x0: address to read/write base + 0x2: 8-bit register value This commit adds a new compatible device, "technologic,sja1000", with read and write functions using the window mechanism. Signed-off-by: Damien Riegel Signed-off-by: Marc Kleine-Budde --- drivers/net/can/sja1000/sja1000_platform.c | 47 ++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'drivers') diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c index 777d312f1779..dc9c6db96c3c 100644 --- a/drivers/net/can/sja1000/sja1000_platform.c +++ b/drivers/net/can/sja1000/sja1000_platform.c @@ -46,6 +46,10 @@ struct sja1000_of_data { int (*init)(struct sja1000_priv *priv, struct device_node *of); }; +struct technologic_priv { + spinlock_t io_lock; +}; + static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg); @@ -76,6 +80,43 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val) iowrite8(val, priv->reg_base + reg * 4); } +static u8 sp_technologic_read_reg16(const struct sja1000_priv *priv, int reg) +{ + struct technologic_priv *tp = priv->priv; + unsigned long flags; + u8 val; + + spin_lock_irqsave(&tp->io_lock, flags); + iowrite16(reg, priv->reg_base + 0); + val = ioread16(priv->reg_base + 2); + spin_unlock_irqrestore(&tp->io_lock, flags); + + return val; +} + +static void sp_technologic_write_reg16(const struct sja1000_priv *priv, + int reg, u8 val) +{ + struct technologic_priv *tp = priv->priv; + unsigned long flags; + + spin_lock_irqsave(&tp->io_lock, flags); + iowrite16(reg, priv->reg_base + 0); + iowrite16(val, priv->reg_base + 2); + spin_unlock_irqrestore(&tp->io_lock, flags); +} + +static int sp_technologic_init(struct sja1000_priv *priv, struct device_node *of) +{ + struct technologic_priv *tp = priv->priv; + + priv->read_reg = sp_technologic_read_reg16; + priv->write_reg = sp_technologic_write_reg16; + spin_lock_init(&tp->io_lock); + + return 0; +} + static void sp_populate(struct sja1000_priv *priv, struct sja1000_platform_data *pdata, unsigned long resource_mem_flags) @@ -160,8 +201,14 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of) priv->cdr |= CDR_CBP; /* default */ } +static struct sja1000_of_data technologic_data = { + .priv_sz = sizeof(struct technologic_priv), + .init = sp_technologic_init, +}; + static const struct of_device_id sp_of_table[] = { { .compatible = "nxp,sja1000", .data = NULL, }, + { .compatible = "technologic,sja1000", .data = &technologic_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, sp_of_table); -- cgit v1.2.3 From 83407c7f42b3ad13ba55211779c7267cd4819ad8 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 11 Jan 2016 19:47:59 +0100 Subject: can: Kconfig: Sort the Kconfig includes Sort the Kconfig includes, no functional change. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/Kconfig | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 6d04183ed955..6554404b04d3 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -147,22 +147,16 @@ config CAN_XILINXCAN Xilinx CAN driver. This driver supports both soft AXI CAN IP and Zynq CANPS IP. -source "drivers/net/can/mscan/Kconfig" - -source "drivers/net/can/sja1000/Kconfig" source "drivers/net/can/c_can/Kconfig" - -source "drivers/net/can/m_can/Kconfig" - source "drivers/net/can/cc770/Kconfig" - +source "drivers/net/can/m_can/Kconfig" +source "drivers/net/can/mscan/Kconfig" +source "drivers/net/can/sja1000/Kconfig" +source "drivers/net/can/softing/Kconfig" source "drivers/net/can/spi/Kconfig" - source "drivers/net/can/usb/Kconfig" -source "drivers/net/can/softing/Kconfig" - endif config CAN_DEBUG_DEVICES -- cgit v1.2.3 From 26821162f958c6211427bc0a12fd1412a0d99e49 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 16 Feb 2016 09:34:38 +0100 Subject: can: Kconfig: sort drivers alphabetically Sort the drivers that are directly listed in the Kconfig alphabetically, no functional change. Signed-off-by: Marc Kleine-Budde --- drivers/net/can/Kconfig | 49 ++++++++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 6554404b04d3..b77ef32a2fe2 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -70,13 +70,6 @@ config CAN_AT91 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 and AT91SAM9X5 processors. -config CAN_TI_HECC - depends on ARM - tristate "TI High End CAN Controller" - ---help--- - Driver for TI HECC (High End CAN Controller) module found on many - TI devices. The device specifications are available from www.ti.com - config CAN_BFIN depends on BF534 || BF536 || BF537 || BF538 || BF539 || BF54x tristate "Analog Devices Blackfin on-chip CAN" @@ -86,30 +79,12 @@ config CAN_BFIN To compile this driver as a module, choose M here: the module will be called bfin_can. -config CAN_JANZ_ICAN3 - tristate "Janz VMOD-ICAN3 Intelligent CAN controller" - depends on MFD_JANZ_CMODIO - ---help--- - Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which - connects to a MODULbus carrier board. - - This driver can also be built as a module. If so, the module will be - called janz-ican3.ko. - config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" depends on ARM || PPC ---help--- Say Y here if you want to support for Freescale FlexCAN. -config PCH_CAN - tristate "Intel EG20T PCH CAN controller" - depends on PCI && (X86_32 || COMPILE_TEST) - ---help--- - This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which - is an IOH for x86 embedded processor (Intel Atom E6xx series). - This driver can access CAN bus. - config CAN_GRCAN tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices" depends on OF && HAS_DMA @@ -119,6 +94,16 @@ config CAN_GRCAN endian syntheses of the cores would need some modifications on the hardware level to work. +config CAN_JANZ_ICAN3 + tristate "Janz VMOD-ICAN3 Intelligent CAN controller" + depends on MFD_JANZ_CMODIO + ---help--- + Driver for Janz VMOD-ICAN3 Intelligent CAN controller module, which + connects to a MODULbus carrier board. + + This driver can also be built as a module. If so, the module will be + called janz-ican3.ko. + config CAN_RCAR tristate "Renesas R-Car CAN controller" depends on ARM @@ -139,6 +124,13 @@ config CAN_SUN4I To compile this driver as a module, choose M here: the module will be called sun4i_can. +config CAN_TI_HECC + depends on ARM + tristate "TI High End CAN Controller" + ---help--- + Driver for TI HECC (High End CAN Controller) module found on many + TI devices. The device specifications are available from www.ti.com + config CAN_XILINXCAN tristate "Xilinx CAN" depends on ARCH_ZYNQ || ARM64 || MICROBLAZE || COMPILE_TEST @@ -147,6 +139,13 @@ config CAN_XILINXCAN Xilinx CAN driver. This driver supports both soft AXI CAN IP and Zynq CANPS IP. +config PCH_CAN + tristate "Intel EG20T PCH CAN controller" + depends on PCI && (X86_32 || COMPILE_TEST) + ---help--- + This driver is for PCH CAN of Topcliff (Intel EG20T PCH) which + is an IOH for x86 embedded processor (Intel Atom E6xx series). + This driver can access CAN bus. source "drivers/net/can/c_can/Kconfig" source "drivers/net/can/cc770/Kconfig" -- cgit v1.2.3 From 47338082066752f68562d51bbe2e36b501a1ce33 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 11 Jan 2016 19:48:00 +0100 Subject: can: Makefile: Sort the Makefile Just sort the drivers in the Makefile, no functional change. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/Makefile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 1f21cef1d458..4f85c2bc95b6 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -14,21 +14,21 @@ obj-y += spi/ obj-y += usb/ obj-y += softing/ -obj-$(CONFIG_CAN_SJA1000) += sja1000/ -obj-$(CONFIG_CAN_MSCAN) += mscan/ -obj-$(CONFIG_CAN_C_CAN) += c_can/ -obj-$(CONFIG_CAN_M_CAN) += m_can/ -obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_AT91) += at91_can.o -obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_BFIN) += bfin_can.o -obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_CC770) += cc770/ +obj-$(CONFIG_CAN_C_CAN) += c_can/ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o -obj-$(CONFIG_PCH_CAN) += pch_can.o obj-$(CONFIG_CAN_GRCAN) += grcan.o +obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o +obj-$(CONFIG_CAN_MSCAN) += mscan/ +obj-$(CONFIG_CAN_M_CAN) += m_can/ obj-$(CONFIG_CAN_RCAR) += rcar_can.o +obj-$(CONFIG_CAN_SJA1000) += sja1000/ obj-$(CONFIG_CAN_SUN4I) += sun4i_can.o +obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o obj-$(CONFIG_CAN_XILINXCAN) += xilinx_can.o +obj-$(CONFIG_PCH_CAN) += pch_can.o subdir-ccflags-y += -D__CHECK_ENDIAN__ subdir-ccflags-$(CONFIG_CAN_DEBUG_DEVICES) += -DDEBUG -- cgit v1.2.3 From 0c4d9c94a1303008b32079991360cea7ab849ef3 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Wed, 20 Jan 2016 15:33:39 +0100 Subject: can: ifi: Add IFI CANFD IP support The patch adds support for IFI CAN/FD controller [1]. This driver currently supports sending and receiving both standard CAN and new CAN/FD frames. Both ISO and BOSCH variant of CAN/FD is supported. [1] http://www.ifi-pld.de/IP/CANFD/canfd.html Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Signed-off-by: Marc Kleine-Budde --- drivers/net/can/Kconfig | 1 + drivers/net/can/Makefile | 1 + drivers/net/can/ifi_canfd/Kconfig | 8 + drivers/net/can/ifi_canfd/Makefile | 5 + drivers/net/can/ifi_canfd/ifi_canfd.c | 917 ++++++++++++++++++++++++++++++++++ 5 files changed, 932 insertions(+) create mode 100644 drivers/net/can/ifi_canfd/Kconfig create mode 100644 drivers/net/can/ifi_canfd/Makefile create mode 100644 drivers/net/can/ifi_canfd/ifi_canfd.c (limited to 'drivers') diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index b77ef32a2fe2..164ccdeca663 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -149,6 +149,7 @@ config PCH_CAN source "drivers/net/can/c_can/Kconfig" source "drivers/net/can/cc770/Kconfig" +source "drivers/net/can/ifi_canfd/Kconfig" source "drivers/net/can/m_can/Kconfig" source "drivers/net/can/mscan/Kconfig" source "drivers/net/can/sja1000/Kconfig" diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile index 4f85c2bc95b6..e3db0c807f55 100644 --- a/drivers/net/can/Makefile +++ b/drivers/net/can/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_CAN_CC770) += cc770/ obj-$(CONFIG_CAN_C_CAN) += c_can/ obj-$(CONFIG_CAN_FLEXCAN) += flexcan.o obj-$(CONFIG_CAN_GRCAN) += grcan.o +obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd/ obj-$(CONFIG_CAN_JANZ_ICAN3) += janz-ican3.o obj-$(CONFIG_CAN_MSCAN) += mscan/ obj-$(CONFIG_CAN_M_CAN) += m_can/ diff --git a/drivers/net/can/ifi_canfd/Kconfig b/drivers/net/can/ifi_canfd/Kconfig new file mode 100644 index 000000000000..9e8934ff63a7 --- /dev/null +++ b/drivers/net/can/ifi_canfd/Kconfig @@ -0,0 +1,8 @@ +config CAN_IFI_CANFD + depends on HAS_IOMEM + tristate "IFI CAN_FD IP" + ---help--- + This driver adds support for the I/F/I CAN_FD soft IP block + connected to the "platform bus" (Linux abstraction for directly + to the processor attached devices). The CAN_FD is most often + synthesised into an FPGA or CPLD. diff --git a/drivers/net/can/ifi_canfd/Makefile b/drivers/net/can/ifi_canfd/Makefile new file mode 100644 index 000000000000..b229960cdf39 --- /dev/null +++ b/drivers/net/can/ifi_canfd/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the IFI CANFD controller driver. +# + +obj-$(CONFIG_CAN_IFI_CANFD) += ifi_canfd.o diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c new file mode 100644 index 000000000000..0d1c164374b7 --- /dev/null +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -0,0 +1,917 @@ +/* + * CAN bus driver for IFI CANFD controller + * + * Copyright (C) 2016 Marek Vasut + * + * Details about this controller can be found at + * http://www.ifi-pld.de/IP/CANFD/canfd.html + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define IFI_CANFD_STCMD 0x0 +#define IFI_CANFD_STCMD_HARDRESET 0xDEADCAFD +#define IFI_CANFD_STCMD_ENABLE BIT(0) +#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) +#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) +#define IFI_CANFD_STCMD_BUSOFF BIT(4) +#define IFI_CANFD_STCMD_BUSMONITOR BIT(16) +#define IFI_CANFD_STCMD_LOOPBACK BIT(18) +#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) +#define IFI_CANFD_STCMD_ENABLE_ISO BIT(25) +#define IFI_CANFD_STCMD_NORMAL_MODE ((u32)BIT(31)) + +#define IFI_CANFD_RXSTCMD 0x4 +#define IFI_CANFD_RXSTCMD_REMOVE_MSG BIT(0) +#define IFI_CANFD_RXSTCMD_RESET BIT(7) +#define IFI_CANFD_RXSTCMD_EMPTY BIT(8) +#define IFI_CANFD_RXSTCMD_OVERFLOW BIT(13) + +#define IFI_CANFD_TXSTCMD 0x8 +#define IFI_CANFD_TXSTCMD_ADD_MSG BIT(0) +#define IFI_CANFD_TXSTCMD_HIGH_PRIO BIT(1) +#define IFI_CANFD_TXSTCMD_RESET BIT(7) +#define IFI_CANFD_TXSTCMD_EMPTY BIT(8) +#define IFI_CANFD_TXSTCMD_FULL BIT(12) +#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) + +#define IFI_CANFD_INTERRUPT 0xc +#define IFI_CANFD_INTERRUPT_ERROR_WARNING ((u32)BIT(1)) +#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) +#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) +#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY BIT(24) +#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER BIT(25) +#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) + +#define IFI_CANFD_IRQMASK 0x10 +#define IFI_CANFD_IRQMASK_SET_ERR BIT(7) +#define IFI_CANFD_IRQMASK_SET_TS BIT(15) +#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) +#define IFI_CANFD_IRQMASK_SET_TX BIT(23) +#define IFI_CANFD_IRQMASK_RXFIFO_NEMPTY BIT(24) +#define IFI_CANFD_IRQMASK_SET_RX ((u32)BIT(31)) + +#define IFI_CANFD_TIME 0x14 +#define IFI_CANFD_FTIME 0x18 +#define IFI_CANFD_TIME_TIMEB_OFF 0 +#define IFI_CANFD_TIME_TIMEA_OFF 8 +#define IFI_CANFD_TIME_PRESCALE_OFF 16 +#define IFI_CANFD_TIME_SJW_OFF_ISO 25 +#define IFI_CANFD_TIME_SJW_OFF_BOSCH 28 +#define IFI_CANFD_TIME_SET_SJW_BOSCH BIT(6) +#define IFI_CANFD_TIME_SET_TIMEB_BOSCH BIT(7) +#define IFI_CANFD_TIME_SET_PRESC_BOSCH BIT(14) +#define IFI_CANFD_TIME_SET_TIMEA_BOSCH BIT(15) + +#define IFI_CANFD_TDELAY 0x1c + +#define IFI_CANFD_ERROR 0x20 +#define IFI_CANFD_ERROR_TX_OFFSET 0 +#define IFI_CANFD_ERROR_TX_MASK 0xff +#define IFI_CANFD_ERROR_RX_OFFSET 16 +#define IFI_CANFD_ERROR_RX_MASK 0xff + +#define IFI_CANFD_ERRCNT 0x24 + +#define IFI_CANFD_SUSPEND 0x28 + +#define IFI_CANFD_REPEAT 0x2c + +#define IFI_CANFD_TRAFFIC 0x30 + +#define IFI_CANFD_TSCONTROL 0x34 + +#define IFI_CANFD_TSC 0x38 + +#define IFI_CANFD_TST 0x3c + +#define IFI_CANFD_RES1 0x40 + +#define IFI_CANFD_RES2 0x44 + +#define IFI_CANFD_PAR 0x48 + +#define IFI_CANFD_CANCLOCK 0x4c + +#define IFI_CANFD_SYSCLOCK 0x50 + +#define IFI_CANFD_VER 0x54 + +#define IFI_CANFD_IP_ID 0x58 +#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD + +#define IFI_CANFD_TEST 0x5c + +#define IFI_CANFD_RXFIFO_TS_63_32 0x60 + +#define IFI_CANFD_RXFIFO_TS_31_0 0x64 + +#define IFI_CANFD_RXFIFO_DLC 0x68 +#define IFI_CANFD_RXFIFO_DLC_DLC_OFFSET 0 +#define IFI_CANFD_RXFIFO_DLC_DLC_MASK 0xf +#define IFI_CANFD_RXFIFO_DLC_RTR BIT(4) +#define IFI_CANFD_RXFIFO_DLC_EDL BIT(5) +#define IFI_CANFD_RXFIFO_DLC_BRS BIT(6) +#define IFI_CANFD_RXFIFO_DLC_ESI BIT(7) +#define IFI_CANFD_RXFIFO_DLC_OBJ_OFFSET 8 +#define IFI_CANFD_RXFIFO_DLC_OBJ_MASK 0x1ff +#define IFI_CANFD_RXFIFO_DLC_FNR_OFFSET 24 +#define IFI_CANFD_RXFIFO_DLC_FNR_MASK 0xff + +#define IFI_CANFD_RXFIFO_ID 0x6c +#define IFI_CANFD_RXFIFO_ID_ID_OFFSET 0 +#define IFI_CANFD_RXFIFO_ID_ID_STD_MASK 0x3ff +#define IFI_CANFD_RXFIFO_ID_ID_XTD_MASK 0x1fffffff +#define IFI_CANFD_RXFIFO_ID_IDE BIT(29) + +#define IFI_CANFD_RXFIFO_DATA 0x70 /* 0x70..0xac */ + +#define IFI_CANFD_TXFIFO_SUSPEND_US 0xb0 + +#define IFI_CANFD_TXFIFO_REPEATCOUNT 0xb4 + +#define IFI_CANFD_TXFIFO_DLC 0xb8 +#define IFI_CANFD_TXFIFO_DLC_DLC_OFFSET 0 +#define IFI_CANFD_TXFIFO_DLC_DLC_MASK 0xf +#define IFI_CANFD_TXFIFO_DLC_RTR BIT(4) +#define IFI_CANFD_TXFIFO_DLC_EDL BIT(5) +#define IFI_CANFD_TXFIFO_DLC_BRS BIT(6) +#define IFI_CANFD_TXFIFO_DLC_FNR_OFFSET 24 +#define IFI_CANFD_TXFIFO_DLC_FNR_MASK 0xff + +#define IFI_CANFD_TXFIFO_ID 0xbc +#define IFI_CANFD_TXFIFO_ID_ID_OFFSET 0 +#define IFI_CANFD_TXFIFO_ID_ID_STD_MASK 0x3ff +#define IFI_CANFD_TXFIFO_ID_ID_XTD_MASK 0x1fffffff +#define IFI_CANFD_TXFIFO_ID_IDE BIT(29) + +#define IFI_CANFD_TXFIFO_DATA 0xc0 /* 0xb0..0xfc */ + +#define IFI_CANFD_FILTER_MASK(n) (0x800 + ((n) * 8) + 0) +#define IFI_CANFD_FILTER_MASK_EXT BIT(29) +#define IFI_CANFD_FILTER_MASK_EDL BIT(30) +#define IFI_CANFD_FILTER_MASK_VALID ((u32)BIT(31)) + +#define IFI_CANFD_FILTER_IDENT(n) (0x800 + ((n) * 8) + 4) +#define IFI_CANFD_FILTER_IDENT_IDE BIT(29) +#define IFI_CANFD_FILTER_IDENT_CANFD BIT(30) +#define IFI_CANFD_FILTER_IDENT_VALID ((u32)BIT(31)) + +/* IFI CANFD private data structure */ +struct ifi_canfd_priv { + struct can_priv can; /* must be the first member */ + struct napi_struct napi; + struct net_device *ndev; + void __iomem *base; +}; + +static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 enirq = 0; + + if (enable) { + enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | + IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; + } + + writel(IFI_CANFD_IRQMASK_SET_ERR | + IFI_CANFD_IRQMASK_SET_TS | + IFI_CANFD_IRQMASK_SET_TX | + IFI_CANFD_IRQMASK_SET_RX | enirq, + priv->base + IFI_CANFD_IRQMASK); +} + +static void ifi_canfd_read_fifo(struct net_device *ndev) +{ + struct net_device_stats *stats = &ndev->stats; + struct ifi_canfd_priv *priv = netdev_priv(ndev); + struct canfd_frame *cf; + struct sk_buff *skb; + const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | + IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER; + u32 rxdlc, rxid; + u32 dlc, id; + int i; + + rxdlc = readl(priv->base + IFI_CANFD_RXFIFO_DLC); + if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) + skb = alloc_canfd_skb(ndev, &cf); + else + skb = alloc_can_skb(ndev, (struct can_frame **)&cf); + + if (!skb) { + stats->rx_dropped++; + return; + } + + dlc = (rxdlc >> IFI_CANFD_RXFIFO_DLC_DLC_OFFSET) & + IFI_CANFD_RXFIFO_DLC_DLC_MASK; + if (rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) + cf->len = can_dlc2len(dlc); + else + cf->len = get_can_dlc(dlc); + + rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID); + id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET); + if (id & IFI_CANFD_RXFIFO_ID_IDE) + id &= IFI_CANFD_RXFIFO_ID_ID_XTD_MASK; + else + id &= IFI_CANFD_RXFIFO_ID_ID_STD_MASK; + cf->can_id = id; + + if (rxdlc & IFI_CANFD_RXFIFO_DLC_ESI) { + cf->flags |= CANFD_ESI; + netdev_dbg(ndev, "ESI Error\n"); + } + + if (!(rxdlc & IFI_CANFD_RXFIFO_DLC_EDL) && + (rxdlc & IFI_CANFD_RXFIFO_DLC_RTR)) { + cf->can_id |= CAN_RTR_FLAG; + } else { + if (rxdlc & IFI_CANFD_RXFIFO_DLC_BRS) + cf->flags |= CANFD_BRS; + + for (i = 0; i < cf->len; i += 4) { + *(u32 *)(cf->data + i) = + readl(priv->base + IFI_CANFD_RXFIFO_DATA + i); + } + } + + /* Remove the packet from FIFO */ + writel(IFI_CANFD_RXSTCMD_REMOVE_MSG, priv->base + IFI_CANFD_RXSTCMD); + writel(rx_irq_mask, priv->base + IFI_CANFD_INTERRUPT); + + stats->rx_packets++; + stats->rx_bytes += cf->len; + + netif_receive_skb(skb); +} + +static int ifi_canfd_do_rx_poll(struct net_device *ndev, int quota) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 pkts = 0; + u32 rxst; + + rxst = readl(priv->base + IFI_CANFD_RXSTCMD); + if (rxst & IFI_CANFD_RXSTCMD_EMPTY) { + netdev_dbg(ndev, "No messages in RX FIFO\n"); + return 0; + } + + for (;;) { + if (rxst & IFI_CANFD_RXSTCMD_EMPTY) + break; + if (quota <= 0) + break; + + ifi_canfd_read_fifo(ndev); + quota--; + pkts++; + rxst = readl(priv->base + IFI_CANFD_RXSTCMD); + } + + if (pkts) + can_led_event(ndev, CAN_LED_EVENT_RX); + + return pkts; +} + +static int ifi_canfd_handle_lost_msg(struct net_device *ndev) +{ + struct net_device_stats *stats = &ndev->stats; + struct sk_buff *skb; + struct can_frame *frame; + + netdev_err(ndev, "RX FIFO overflow, message(s) lost.\n"); + + stats->rx_errors++; + stats->rx_over_errors++; + + skb = alloc_can_err_skb(ndev, &frame); + if (unlikely(!skb)) + return 0; + + frame->can_id |= CAN_ERR_CRTL; + frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; + + netif_receive_skb(skb); + + return 1; +} + +static int ifi_canfd_get_berr_counter(const struct net_device *ndev, + struct can_berr_counter *bec) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 err; + + err = readl(priv->base + IFI_CANFD_ERROR); + bec->rxerr = (err >> IFI_CANFD_ERROR_RX_OFFSET) & + IFI_CANFD_ERROR_RX_MASK; + bec->txerr = (err >> IFI_CANFD_ERROR_TX_OFFSET) & + IFI_CANFD_ERROR_TX_MASK; + + return 0; +} + +static int ifi_canfd_handle_state_change(struct net_device *ndev, + enum can_state new_state) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + struct can_frame *cf; + struct sk_buff *skb; + struct can_berr_counter bec; + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + /* error warning state */ + priv->can.can_stats.error_warning++; + priv->can.state = CAN_STATE_ERROR_WARNING; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + priv->can.can_stats.error_passive++; + priv->can.state = CAN_STATE_ERROR_PASSIVE; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + priv->can.state = CAN_STATE_BUS_OFF; + ifi_canfd_irq_enable(ndev, 0); + priv->can.can_stats.bus_off++; + can_bus_off(ndev); + break; + default: + break; + } + + /* propagate the error condition to the CAN stack */ + skb = alloc_can_err_skb(ndev, &cf); + if (unlikely(!skb)) + return 0; + + ifi_canfd_get_berr_counter(ndev, &bec); + + switch (new_state) { + case CAN_STATE_ERROR_ACTIVE: + /* error warning state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] = (bec.txerr > bec.rxerr) ? + CAN_ERR_CRTL_TX_WARNING : + CAN_ERR_CRTL_RX_WARNING; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_ERROR_PASSIVE: + /* error passive state */ + cf->can_id |= CAN_ERR_CRTL; + cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; + if (bec.txerr > 127) + cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; + cf->data[6] = bec.txerr; + cf->data[7] = bec.rxerr; + break; + case CAN_STATE_BUS_OFF: + /* bus-off state */ + cf->can_id |= CAN_ERR_BUSOFF; + break; + default: + break; + } + + stats->rx_packets++; + stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + + return 1; +} + +static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + int work_done = 0; + u32 isr; + + /* + * The ErrWarn condition is a little special, since the bit is + * located in the INTERRUPT register instead of STCMD register. + */ + isr = readl(priv->base + IFI_CANFD_INTERRUPT); + if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && + (priv->can.state != CAN_STATE_ERROR_WARNING)) { + /* Clear the interrupt */ + writel(IFI_CANFD_INTERRUPT_ERROR_WARNING, + priv->base + IFI_CANFD_INTERRUPT); + netdev_dbg(ndev, "Error, entered warning state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_ERROR_WARNING); + } + + if ((stcmd & IFI_CANFD_STCMD_ERROR_PASSIVE) && + (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { + netdev_dbg(ndev, "Error, entered passive state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_ERROR_PASSIVE); + } + + if ((stcmd & IFI_CANFD_STCMD_BUSOFF) && + (priv->can.state != CAN_STATE_BUS_OFF)) { + netdev_dbg(ndev, "Error, entered bus-off state\n"); + work_done += ifi_canfd_handle_state_change(ndev, + CAN_STATE_BUS_OFF); + } + + return work_done; +} + +static int ifi_canfd_poll(struct napi_struct *napi, int quota) +{ + struct net_device *ndev = napi->dev; + struct ifi_canfd_priv *priv = netdev_priv(ndev); + const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE | + IFI_CANFD_STCMD_BUSOFF; + int work_done = 0; + + u32 stcmd = readl(priv->base + IFI_CANFD_STCMD); + u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD); + + /* Handle bus state changes */ + if ((stcmd & stcmd_state_mask) || + ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0)) + work_done += ifi_canfd_handle_state_errors(ndev, stcmd); + + /* Handle lost messages on RX */ + if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) + work_done += ifi_canfd_handle_lost_msg(ndev); + + /* Handle normal messages on RX */ + if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) + work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done); + + if (work_done < quota) { + napi_complete(napi); + ifi_canfd_irq_enable(ndev, 1); + } + + return work_done; +} + +static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) +{ + struct net_device *ndev = (struct net_device *)dev_id; + struct ifi_canfd_priv *priv = netdev_priv(ndev); + struct net_device_stats *stats = &ndev->stats; + const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | + IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER; + const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | + IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; + const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ | + IFI_CANFD_INTERRUPT_ERROR_WARNING); + u32 isr; + + isr = readl(priv->base + IFI_CANFD_INTERRUPT); + + /* No interrupt */ + if (isr == 0) + return IRQ_NONE; + + /* Clear all pending interrupts but ErrWarn */ + writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT); + + /* RX IRQ, start NAPI */ + if (isr & rx_irq_mask) { + ifi_canfd_irq_enable(ndev, 0); + napi_schedule(&priv->napi); + } + + /* TX IRQ */ + if (isr & tx_irq_mask) { + stats->tx_bytes += can_get_echo_skb(ndev, 0); + stats->tx_packets++; + can_led_event(ndev, CAN_LED_EVENT_TX); + netif_wake_queue(ndev); + } + + return IRQ_HANDLED; +} + +static const struct can_bittiming_const ifi_canfd_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 16, + .sjw_max = 16, + .brp_min = 1, + .brp_max = 1024, + .brp_inc = 1, +}; + +static const struct can_bittiming_const ifi_canfd_data_bittiming_const = { + .name = KBUILD_MODNAME, + .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 16, + .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 8, + .sjw_max = 4, + .brp_min = 1, + .brp_max = 32, + .brp_inc = 1, +}; + +static void ifi_canfd_set_bittiming(struct net_device *ndev) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + const struct can_bittiming *bt = &priv->can.bittiming; + const struct can_bittiming *dbt = &priv->can.data_bittiming; + u16 brp, sjw, tseg1, tseg2; + u32 noniso_arg = 0; + u32 time_off; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) { + noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH | + IFI_CANFD_TIME_SET_TIMEA_BOSCH | + IFI_CANFD_TIME_SET_PRESC_BOSCH | + IFI_CANFD_TIME_SET_SJW_BOSCH; + time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH; + } else { + time_off = IFI_CANFD_TIME_SJW_OFF_ISO; + } + + /* Configure bit timing */ + brp = bt->brp - 1; + sjw = bt->sjw - 1; + tseg1 = bt->prop_seg + bt->phase_seg1 - 1; + tseg2 = bt->phase_seg2 - 1; + writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) | + (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) | + (brp << IFI_CANFD_TIME_PRESCALE_OFF) | + (sjw << time_off), + priv->base + IFI_CANFD_TIME); + + /* Configure data bit timing */ + brp = dbt->brp - 1; + sjw = dbt->sjw - 1; + tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; + tseg2 = dbt->phase_seg2 - 1; + writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) | + (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) | + (brp << IFI_CANFD_TIME_PRESCALE_OFF) | + (sjw << time_off) | + noniso_arg, + priv->base + IFI_CANFD_FTIME); +} + +static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id, + const u32 mask, const u32 ident) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + + writel(mask, priv->base + IFI_CANFD_FILTER_MASK(id)); + writel(ident, priv->base + IFI_CANFD_FILTER_IDENT(id)); +} + +static void ifi_canfd_set_filters(struct net_device *ndev) +{ + /* Receive all CAN frames (standard ID) */ + ifi_canfd_set_filter(ndev, 0, + IFI_CANFD_FILTER_MASK_VALID | + IFI_CANFD_FILTER_MASK_EXT, + IFI_CANFD_FILTER_IDENT_VALID); + + /* Receive all CAN frames (extended ID) */ + ifi_canfd_set_filter(ndev, 1, + IFI_CANFD_FILTER_MASK_VALID | + IFI_CANFD_FILTER_MASK_EXT, + IFI_CANFD_FILTER_IDENT_VALID | + IFI_CANFD_FILTER_IDENT_IDE); + + /* Receive all CANFD frames */ + ifi_canfd_set_filter(ndev, 2, + IFI_CANFD_FILTER_MASK_VALID | + IFI_CANFD_FILTER_MASK_EDL | + IFI_CANFD_FILTER_MASK_EXT, + IFI_CANFD_FILTER_IDENT_VALID | + IFI_CANFD_FILTER_IDENT_CANFD | + IFI_CANFD_FILTER_IDENT_IDE); +} + +static void ifi_canfd_start(struct net_device *ndev) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + u32 stcmd; + + /* Reset the IP */ + writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD); + writel(0, priv->base + IFI_CANFD_STCMD); + + ifi_canfd_set_bittiming(ndev); + ifi_canfd_set_filters(ndev); + + /* Reset FIFOs */ + writel(IFI_CANFD_RXSTCMD_RESET, priv->base + IFI_CANFD_RXSTCMD); + writel(0, priv->base + IFI_CANFD_RXSTCMD); + writel(IFI_CANFD_TXSTCMD_RESET, priv->base + IFI_CANFD_TXSTCMD); + writel(0, priv->base + IFI_CANFD_TXSTCMD); + + /* Repeat transmission until successful */ + writel(0, priv->base + IFI_CANFD_REPEAT); + writel(0, priv->base + IFI_CANFD_SUSPEND); + + /* Clear all pending interrupts */ + writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ), + priv->base + IFI_CANFD_INTERRUPT); + + stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) + stcmd |= IFI_CANFD_STCMD_BUSMONITOR; + + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + stcmd |= IFI_CANFD_STCMD_LOOPBACK; + + if (priv->can.ctrlmode & CAN_CTRLMODE_FD) + stcmd |= IFI_CANFD_STCMD_ENABLE_ISO; + + if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO))) + stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD; + + priv->can.state = CAN_STATE_ERROR_ACTIVE; + + ifi_canfd_irq_enable(ndev, 1); + + /* Enable controller */ + writel(stcmd, priv->base + IFI_CANFD_STCMD); +} + +static void ifi_canfd_stop(struct net_device *ndev) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + + /* Reset the IP */ + writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD); + + /* Mask all interrupts */ + writel(~0, priv->base + IFI_CANFD_IRQMASK); + + /* Clear all pending interrupts */ + writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ), + priv->base + IFI_CANFD_INTERRUPT); + + /* Set the state as STOPPED */ + priv->can.state = CAN_STATE_STOPPED; +} + +static int ifi_canfd_set_mode(struct net_device *ndev, enum can_mode mode) +{ + switch (mode) { + case CAN_MODE_START: + ifi_canfd_start(ndev); + netif_wake_queue(ndev); + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int ifi_canfd_open(struct net_device *ndev) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + int ret; + + ret = open_candev(ndev); + if (ret) { + netdev_err(ndev, "Failed to open CAN device\n"); + return ret; + } + + /* Register interrupt handler */ + ret = request_irq(ndev->irq, ifi_canfd_isr, IRQF_SHARED, + ndev->name, ndev); + if (ret < 0) { + netdev_err(ndev, "Failed to request interrupt\n"); + goto err_irq; + } + + ifi_canfd_start(ndev); + + can_led_event(ndev, CAN_LED_EVENT_OPEN); + napi_enable(&priv->napi); + netif_start_queue(ndev); + + return 0; +err_irq: + close_candev(ndev); + return ret; +} + +static int ifi_canfd_close(struct net_device *ndev) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&priv->napi); + + ifi_canfd_stop(ndev); + + free_irq(ndev->irq, ndev); + + close_candev(ndev); + + can_led_event(ndev, CAN_LED_EVENT_STOP); + + return 0; +} + +static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct ifi_canfd_priv *priv = netdev_priv(ndev); + struct canfd_frame *cf = (struct canfd_frame *)skb->data; + u32 txst, txid; + u32 txdlc = 0; + int i; + + if (can_dropped_invalid_skb(ndev, skb)) + return NETDEV_TX_OK; + + /* Check if the TX buffer is full */ + txst = readl(priv->base + IFI_CANFD_TXSTCMD); + if (txst & IFI_CANFD_TXSTCMD_FULL) { + netif_stop_queue(ndev); + netdev_err(ndev, "BUG! TX FIFO full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + netif_stop_queue(ndev); + + if (cf->can_id & CAN_EFF_FLAG) { + txid = cf->can_id & CAN_EFF_MASK; + txid |= IFI_CANFD_TXFIFO_ID_IDE; + } else { + txid = cf->can_id & CAN_SFF_MASK; + } + + if (priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) { + if (can_is_canfd_skb(skb)) { + txdlc |= IFI_CANFD_TXFIFO_DLC_EDL; + if (cf->flags & CANFD_BRS) + txdlc |= IFI_CANFD_TXFIFO_DLC_BRS; + } + } + + if (cf->can_id & CAN_RTR_FLAG) + txdlc |= IFI_CANFD_TXFIFO_DLC_RTR; + + /* message ram configuration */ + writel(txid, priv->base + IFI_CANFD_TXFIFO_ID); + writel(txdlc, priv->base + IFI_CANFD_TXFIFO_DLC); + + for (i = 0; i < cf->len; i += 4) { + writel(*(u32 *)(cf->data + i), + priv->base + IFI_CANFD_TXFIFO_DATA + i); + } + + writel(0, priv->base + IFI_CANFD_TXFIFO_REPEATCOUNT); + writel(0, priv->base + IFI_CANFD_TXFIFO_SUSPEND_US); + + can_put_echo_skb(skb, ndev, 0); + + /* Start the transmission */ + writel(IFI_CANFD_TXSTCMD_ADD_MSG, priv->base + IFI_CANFD_TXSTCMD); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops ifi_canfd_netdev_ops = { + .ndo_open = ifi_canfd_open, + .ndo_stop = ifi_canfd_close, + .ndo_start_xmit = ifi_canfd_start_xmit, + .ndo_change_mtu = can_change_mtu, +}; + +static int ifi_canfd_plat_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct net_device *ndev; + struct ifi_canfd_priv *priv; + struct resource *res; + void __iomem *addr; + int irq, ret; + u32 id; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + addr = devm_ioremap_resource(dev, res); + irq = platform_get_irq(pdev, 0); + if (IS_ERR(addr) || irq < 0) + return -EINVAL; + + id = readl(addr + IFI_CANFD_IP_ID); + if (id != IFI_CANFD_IP_ID_VALUE) { + dev_err(dev, "This block is not IFI CANFD, id=%08x\n", id); + return -EINVAL; + } + + ndev = alloc_candev(sizeof(*priv), 1); + if (!ndev) + return -ENOMEM; + + ndev->irq = irq; + ndev->flags |= IFF_ECHO; /* we support local echo */ + ndev->netdev_ops = &ifi_canfd_netdev_ops; + + priv = netdev_priv(ndev); + priv->ndev = ndev; + priv->base = addr; + + netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64); + + priv->can.state = CAN_STATE_STOPPED; + + priv->can.clock.freq = readl(addr + IFI_CANFD_SYSCLOCK); + + priv->can.bittiming_const = &ifi_canfd_bittiming_const; + priv->can.data_bittiming_const = &ifi_canfd_data_bittiming_const; + priv->can.do_set_mode = ifi_canfd_set_mode; + priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter; + + /* IFI CANFD can do both Bosch FD and ISO FD */ + priv->can.ctrlmode = CAN_CTRLMODE_FD; + + /* IFI CANFD can do both Bosch FD and ISO FD */ + priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | + CAN_CTRLMODE_LISTENONLY | + CAN_CTRLMODE_FD | + CAN_CTRLMODE_FD_NON_ISO; + + platform_set_drvdata(pdev, ndev); + SET_NETDEV_DEV(ndev, dev); + + ret = register_candev(ndev); + if (ret) { + dev_err(dev, "Failed to register (ret=%d)\n", ret); + goto err_reg; + } + + devm_can_led_init(ndev); + + dev_info(dev, "Driver registered: regs=%p, irq=%d, clock=%d\n", + priv->base, ndev->irq, priv->can.clock.freq); + + return 0; + +err_reg: + free_candev(ndev); + return ret; +} + +static int ifi_canfd_plat_remove(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + + unregister_candev(ndev); + platform_set_drvdata(pdev, NULL); + free_candev(ndev); + + return 0; +} + +static const struct of_device_id ifi_canfd_of_table[] = { + { .compatible = "ifi,canfd-1.0", .data = NULL }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, ifi_canfd_of_table); + +static struct platform_driver ifi_canfd_plat_driver = { + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = ifi_canfd_of_table, + }, + .probe = ifi_canfd_plat_probe, + .remove = ifi_canfd_plat_remove, +}; + +module_platform_driver(ifi_canfd_plat_driver); + +MODULE_AUTHOR("Marek Vasut "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("CAN bus driver for IFI CANFD controller"); -- cgit v1.2.3 From 14f1f724355206dea1cf3f23ee87993bfd47c70c Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 19 Feb 2016 11:26:24 -0800 Subject: GENEVE: Support outer IPv4 Tx checksums by default This change makes it so that if UDP CSUM is not specified we will default to enabling it. The main motivation behind this is the fact that with the use of outer checksum we can greatly improve the performance for GENEVE tunnels on hardware that doesn't know how to parse them. Signed-off-by: Alexander Duyck Acked-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/geneve.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index dfbe3ca687f7..8fa8388cc5d4 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -76,7 +76,7 @@ struct geneve_dev { }; /* Geneve device flags */ -#define GENEVE_F_UDP_CSUM BIT(0) +#define GENEVE_F_UDP_ZERO_CSUM_TX BIT(0) #define GENEVE_F_UDP_ZERO_CSUM6_TX BIT(1) #define GENEVE_F_UDP_ZERO_CSUM6_RX BIT(2) @@ -703,7 +703,7 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb, struct genevehdr *gnvh; int min_headroom; int err; - bool udp_sum = !!(flags & GENEVE_F_UDP_CSUM); + bool udp_sum = !(flags & GENEVE_F_UDP_ZERO_CSUM_TX); skb_scrub_packet(skb, xnet); @@ -944,9 +944,9 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, opts = ip_tunnel_info_opts(info); if (key->tun_flags & TUNNEL_CSUM) - flags |= GENEVE_F_UDP_CSUM; + flags &= ~GENEVE_F_UDP_ZERO_CSUM_TX; else - flags &= ~GENEVE_F_UDP_CSUM; + flags |= GENEVE_F_UDP_ZERO_CSUM_TX; err = geneve_build_skb(rt, skb, key->tun_flags, vni, info->options_len, opts, flags, xnet); @@ -972,7 +972,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, fl4.saddr, fl4.daddr, tos, ttl, df, sport, geneve->dst_port, !net_eq(geneve->net, dev_net(geneve->dev)), - !(flags & GENEVE_F_UDP_CSUM)); + !!(flags & GENEVE_F_UDP_ZERO_CSUM_TX)); return NETDEV_TX_OK; @@ -1383,8 +1383,8 @@ static int geneve_newlink(struct net *net, struct net_device *dev, metadata = true; if (data[IFLA_GENEVE_UDP_CSUM] && - nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) - flags |= GENEVE_F_UDP_CSUM; + !nla_get_u8(data[IFLA_GENEVE_UDP_CSUM])) + flags |= GENEVE_F_UDP_ZERO_CSUM_TX; if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX] && nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) @@ -1454,7 +1454,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) } if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM, - !!(geneve->flags & GENEVE_F_UDP_CSUM)) || + !(geneve->flags & GENEVE_F_UDP_ZERO_CSUM_TX)) || nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, !!(geneve->flags & GENEVE_F_UDP_ZERO_CSUM6_TX)) || nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, -- cgit v1.2.3 From 6ceb31ca5f65acff299dbc3da5854d54e147b7d8 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Fri, 19 Feb 2016 11:26:31 -0800 Subject: VXLAN: Support outer IPv4 Tx checksums by default This change makes it so that if UDP CSUM is not specified we will default to enabling it. The main motivation behind this is the fact that with the use of outer checksum we can greatly improve the performance for VXLAN tunnels on devices that don't know how to parse tunnel headers. Signed-off-by: Alexander Duyck Acked-by: Tom Herbert Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 19 +++++++++---------- include/net/vxlan.h | 2 +- 2 files changed, 10 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index c963897e713d..2ddc642fb64f 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1957,13 +1957,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; sk = vxlan->vn4_sock->sock->sk; - if (info) { - if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) - df = htons(IP_DF); - } else { - udp_sum = !!(flags & VXLAN_F_UDP_CSUM); - } - rt = vxlan_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, dst->sin.sin_addr.s_addr, &saddr, @@ -1997,6 +1990,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } + if (!info) + udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM_TX); + else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + df = htons(IP_DF); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr), @@ -2898,8 +2896,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_PORT]) conf.dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); - if (data[IFLA_VXLAN_UDP_CSUM] && nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) - conf.flags |= VXLAN_F_UDP_CSUM; + if (data[IFLA_VXLAN_UDP_CSUM] && + !nla_get_u8(data[IFLA_VXLAN_UDP_CSUM])) + conf.flags |= VXLAN_F_UDP_ZERO_CSUM_TX; if (data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX] && nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_TX])) @@ -3043,7 +3042,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->cfg.addrmax) || nla_put_be16(skb, IFLA_VXLAN_PORT, vxlan->cfg.dst_port) || nla_put_u8(skb, IFLA_VXLAN_UDP_CSUM, - !!(vxlan->flags & VXLAN_F_UDP_CSUM)) || + !(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) || nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_TX, !!(vxlan->flags & VXLAN_F_UDP_ZERO_CSUM6_TX)) || nla_put_u8(skb, IFLA_VXLAN_UDP_ZERO_CSUM6_RX, diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 748083de367a..6eda4ed4d78b 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -197,7 +197,7 @@ struct vxlan_dev { #define VXLAN_F_L2MISS 0x08 #define VXLAN_F_L3MISS 0x10 #define VXLAN_F_IPV6 0x20 -#define VXLAN_F_UDP_CSUM 0x40 +#define VXLAN_F_UDP_ZERO_CSUM_TX 0x40 #define VXLAN_F_UDP_ZERO_CSUM6_TX 0x80 #define VXLAN_F_UDP_ZERO_CSUM6_RX 0x100 #define VXLAN_F_REMCSUM_TX 0x200 -- cgit v1.2.3 From c3aaa06d5a63609641b7ad62ee0956f3de86c1cd Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Sat, 20 Feb 2016 19:31:32 -0800 Subject: ipvlan: scrub skb before routing in L3 mode. Scrub skb before hitting the iptable hooks to ensure packets hit these hooks. Set the xnet param only when the packet is crossing the ns boundry so if the IPvlan slave and master belong to the same ns, the param will be set to false. Signed-off-by: Mahesh Bandewar CC: Cong Wang Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_core.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8c48bb2a94ea..4e60c6bbdb6e 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -342,7 +342,7 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, return addr; } -static int ipvlan_process_v4_outbound(struct sk_buff *skb) +static int ipvlan_process_v4_outbound(struct sk_buff *skb, bool xnet) { const struct iphdr *ip4h = ip_hdr(skb); struct net_device *dev = skb->dev; @@ -365,7 +365,7 @@ static int ipvlan_process_v4_outbound(struct sk_buff *skb) ip_rt_put(rt); goto err; } - skb_dst_drop(skb); + skb_scrub_packet(skb, xnet); skb_dst_set(skb, &rt->dst); err = ip_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) @@ -380,7 +380,7 @@ out: return ret; } -static int ipvlan_process_v6_outbound(struct sk_buff *skb) +static int ipvlan_process_v6_outbound(struct sk_buff *skb, bool xnet) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); struct net_device *dev = skb->dev; @@ -403,7 +403,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb) dst_release(dst); goto err; } - skb_dst_drop(skb); + skb_scrub_packet(skb, xnet); skb_dst_set(skb, dst); err = ip6_local_out(net, skb->sk, skb); if (unlikely(net_xmit_eval(err))) @@ -418,8 +418,7 @@ out: return ret; } -static int ipvlan_process_outbound(struct sk_buff *skb, - const struct ipvl_dev *ipvlan) +static int ipvlan_process_outbound(struct sk_buff *skb, bool xnet) { struct ethhdr *ethh = eth_hdr(skb); int ret = NET_XMIT_DROP; @@ -443,9 +442,9 @@ static int ipvlan_process_outbound(struct sk_buff *skb, } if (skb->protocol == htons(ETH_P_IPV6)) - ret = ipvlan_process_v6_outbound(skb); + ret = ipvlan_process_v6_outbound(skb, xnet); else if (skb->protocol == htons(ETH_P_IP)) - ret = ipvlan_process_v4_outbound(skb); + ret = ipvlan_process_v4_outbound(skb, xnet); else { pr_warn_ratelimited("Dropped outbound packet type=%x\n", ntohs(skb->protocol)); @@ -481,6 +480,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) void *lyr3h; struct ipvl_addr *addr; int addr_type; + bool xnet; lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); if (!lyr3h) @@ -491,8 +491,9 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) return ipvlan_rcv_frame(addr, &skb, true); out: + xnet = !net_eq(dev_net(skb->dev), dev_net(ipvlan->phy_dev)); skb->dev = ipvlan->phy_dev; - return ipvlan_process_outbound(skb, ipvlan); + return ipvlan_process_outbound(skb, xnet); } static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) -- cgit v1.2.3 From e93fbc5a15ff25d4f9fd92a13c33cd37d99a2340 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Sat, 20 Feb 2016 19:31:36 -0800 Subject: ipvlan: mode is u16 The mode argument was erronusly defined as u32 but it has always been u16. Also use ipvlan_set_mode() helper to set the mode instead of assigning directly. This should avoid future erronus assignments / updates. Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan.h | 1 - drivers/net/ipvlan/ipvlan_main.c | 9 ++++++--- 2 files changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 9542b7bac61a..817cab1a7959 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -115,7 +115,6 @@ static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) } void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev); -void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval); void ipvlan_init_secret(void); unsigned int ipvlan_mac_hash(const unsigned char *addr); rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 7a3b41468a55..5bcb852c5500 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -14,7 +14,7 @@ void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj; } -void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval) +static void ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) { struct ipvl_dev *ipvlan; @@ -442,6 +442,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, struct ipvl_port *port; struct net_device *phy_dev; int err; + u16 mode = IPVLAN_MODE_L3; if (!tb[IFLA_LINK]) return -EINVAL; @@ -460,10 +461,10 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, return err; } - port = ipvlan_port_get_rtnl(phy_dev); if (data && data[IFLA_IPVLAN_MODE]) - port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); + port = ipvlan_port_get_rtnl(phy_dev); ipvlan->phy_dev = phy_dev; ipvlan->dev = dev; ipvlan->port = port; @@ -489,6 +490,8 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, goto ipvlan_destroy_port; list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans); + ipvlan_set_port_mode(port, mode); + netif_stacked_transfer_operstate(phy_dev, dev); return 0; -- cgit v1.2.3 From ab5b7013db3cc637a8f19e00d71310e40db75bf6 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Sat, 20 Feb 2016 19:31:41 -0800 Subject: ipvlan: misc changes 1. scope correction for few functions that are used in single file. 2. Adjust variables that are used in fast-path to fit into single cacheline 3. Update rcv_frame() to skip shared check for frames coming over wire Signed-off-by: Mahesh Bandewar Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan.h | 9 +++------ drivers/net/ipvlan/ipvlan_core.c | 27 ++++++++++++++++----------- drivers/net/ipvlan/ipvlan_main.c | 2 +- 3 files changed, 20 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 817cab1a7959..695a5dc9ace3 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -84,19 +84,19 @@ struct ipvl_addr { #define ip4addr ipu.ip4 struct hlist_node hlnode; /* Hash-table linkage */ struct list_head anode; /* logical-interface linkage */ - struct rcu_head rcu; ipvl_hdr_type atype; + struct rcu_head rcu; }; struct ipvl_port { struct net_device *dev; struct hlist_head hlhead[IPVLAN_HASH_SIZE]; struct list_head ipvlans; - struct rcu_head rcu; + u16 mode; struct work_struct wq; struct sk_buff_head backlog; int count; - u16 mode; + struct rcu_head rcu; }; static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) @@ -114,7 +114,6 @@ static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) return rtnl_dereference(d->rx_handler_data); } -void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev); void ipvlan_init_secret(void); unsigned int ipvlan_mac_hash(const unsigned char *addr); rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb); @@ -124,7 +123,5 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr); struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, const void *iaddr, bool is_v6); bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); -struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, - const void *iaddr, bool is_v6); void ipvlan_ht_addr_del(struct ipvl_addr *addr); #endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 4e60c6bbdb6e..d6d0524ee5fd 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -53,8 +53,8 @@ static u8 ipvlan_get_v4_hash(const void *iaddr) IPVLAN_HASH_MASK; } -struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, - const void *iaddr, bool is_v6) +static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, + const void *iaddr, bool is_v6) { struct ipvl_addr *addr; u8 hash; @@ -265,20 +265,25 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, struct sk_buff *skb = *pskb; len = skb->len + ETH_HLEN; - if (unlikely(!(dev->flags & IFF_UP))) { - kfree_skb(skb); - goto out; - } + /* Only packets exchanged between two local slaves need to have + * device-up check as well as skb-share check. + */ + if (local) { + if (unlikely(!(dev->flags & IFF_UP))) { + kfree_skb(skb); + goto out; + } - skb = skb_share_check(skb, GFP_ATOMIC); - if (!skb) - goto out; + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto out; - *pskb = skb; + *pskb = skb; + } skb->dev = dev; - skb->pkt_type = PACKET_HOST; if (local) { + skb->pkt_type = PACKET_HOST; if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 5bcb852c5500..a7ca1c519a0d 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -9,7 +9,7 @@ #include "ipvlan.h" -void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) +static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev) { ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj; } -- cgit v1.2.3 From 60fffb3b80e8669636ab4bc067869b7516f47147 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 21 Feb 2016 11:40:07 +0200 Subject: qed: Turn most GFP_ATOMIC into GFP_KERNEL Initial driver submission used GFP_ATOMIC almost inclusively when allocating memory. We now remedy this point, using GFP_KERNEL where it's possible. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_cxt.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_dev.c | 6 +++--- drivers/net/ethernet/qlogic/qed/qed_hw.c | 2 +- drivers/net/ethernet/qlogic/qed/qed_int.c | 8 ++++---- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 6 +++--- drivers/net/ethernet/qlogic/qed/qed_spq.c | 6 +++--- 6 files changed, 15 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c index d3f7a0215e7e..fc767c07a264 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c @@ -448,7 +448,7 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn) struct qed_cxt_mngr *p_mngr; u32 i; - p_mngr = kzalloc(sizeof(*p_mngr), GFP_ATOMIC); + p_mngr = kzalloc(sizeof(*p_mngr), GFP_KERNEL); if (!p_mngr) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_cxt_mngr'\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index bc17ed2c9cac..acfe7be49a58 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -134,17 +134,17 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn) /* PQs will be arranged as follows: First per-TC PQ then pure-LB quete. */ qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) * - num_pqs, GFP_ATOMIC); + num_pqs, GFP_KERNEL); if (!qm_info->qm_pq_params) goto alloc_err; qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) * - num_vports, GFP_ATOMIC); + num_vports, GFP_KERNEL); if (!qm_info->qm_vport_params) goto alloc_err; qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) * - MAX_NUM_PORTS, GFP_ATOMIC); + MAX_NUM_PORTS, GFP_KERNEL); if (!qm_info->qm_port_params) goto alloc_err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index ffa99273b353..a95a3e4b3101 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -44,7 +44,7 @@ struct qed_ptt_pool { int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn) { struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), - GFP_ATOMIC); + GFP_KERNEL); int i; if (!p_pool) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 9cc9d62c1fec..90ba1d71e67a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -399,7 +399,7 @@ static int qed_int_sb_attn_alloc(struct qed_hwfn *p_hwfn, dma_addr_t p_phys = 0; /* SB struct */ - p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); if (!p_sb) { DP_NOTICE(cdev, "Failed to allocate `struct qed_sb_attn_info'\n"); return -ENOMEM; @@ -666,7 +666,7 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, void *p_virt; /* SB struct */ - p_sb = kmalloc(sizeof(*p_sb), GFP_ATOMIC); + p_sb = kmalloc(sizeof(*p_sb), GFP_KERNEL); if (!p_sb) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sb_info'\n"); return -ENOMEM; @@ -946,7 +946,7 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, u16 sb_id; u16 prev_sb_id = 0xFF; - p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_ATOMIC); + p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL); if (!p_hwfn->hw_info.p_igu_info) return -ENOMEM; @@ -1072,7 +1072,7 @@ static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn) static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn) { - p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_ATOMIC); + p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL); if (!p_hwfn->sp_dpc) return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 1457e30faccf..f23ce734ab63 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -147,7 +147,7 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, u32 size; /* Allocate mcp_info structure */ - p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_ATOMIC); + p_hwfn->mcp_info = kzalloc(sizeof(*p_hwfn->mcp_info), GFP_KERNEL); if (!p_hwfn->mcp_info) goto err; p_info = p_hwfn->mcp_info; @@ -161,10 +161,10 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, } size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); - p_info->mfw_mb_cur = kzalloc(size, GFP_ATOMIC); + p_info->mfw_mb_cur = kzalloc(size, GFP_KERNEL); p_info->mfw_mb_shadow = kzalloc(sizeof(u32) * MFW_DRV_MSG_MAX_DWORDS( - p_info->mfw_mb_length), GFP_ATOMIC); + p_info->mfw_mb_length), GFP_KERNEL); if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 3dd548ab8df1..f6c6c21601d7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -327,7 +327,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq; /* Allocate EQ struct */ - p_eq = kzalloc(sizeof(*p_eq), GFP_ATOMIC); + p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL); if (!p_eq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n"); return NULL; @@ -457,7 +457,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) /* SPQ struct */ p_spq = - kzalloc(sizeof(struct qed_spq), GFP_ATOMIC); + kzalloc(sizeof(struct qed_spq), GFP_KERNEL); if (!p_spq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n"); return -ENOMEM; @@ -853,7 +853,7 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) struct qed_consq *p_consq; /* Allocate ConsQ struct */ - p_consq = kzalloc(sizeof(*p_consq), GFP_ATOMIC); + p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL); if (!p_consq) { DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n"); return NULL; -- cgit v1.2.3 From 0a0c5d3b81529de2c1b65eb6582a4b6d0da1ada9 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 21 Feb 2016 11:40:08 +0200 Subject: qed: Handle possible race in SB config Due to HW design, some of the memories are wide-bus and access to those needs to be sequentialized on a per-HW-block level; Read/write to a given HW-block might break other read/write to wide-bus memory done at ~same time. Status blocks initialization in CAU is done into such a wide-bus memory. This moves the initialization into using DMAE which is guaranteed to be safe to use on such memories. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_int.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 90ba1d71e67a..fa73daa94655 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -473,20 +473,20 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid) { struct cau_sb_entry sb_entry; - u32 val; qed_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, vf_number, vf_valid); if (p_hwfn->hw_init_done) { - val = CAU_REG_SB_ADDR_MEMORY + igu_sb_id * sizeof(u64); - qed_wr(p_hwfn, p_ptt, val, lower_32_bits(sb_phys)); - qed_wr(p_hwfn, p_ptt, val + sizeof(u32), - upper_32_bits(sb_phys)); - - val = CAU_REG_SB_VAR_MEMORY + igu_sb_id * sizeof(u64); - qed_wr(p_hwfn, p_ptt, val, sb_entry.data); - qed_wr(p_hwfn, p_ptt, val + sizeof(u32), sb_entry.params); + /* Wide-bus, initialize via DMAE */ + u64 phys_addr = (u64)sb_phys; + + qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&phys_addr, + CAU_REG_SB_ADDR_MEMORY + + igu_sb_id * sizeof(u64), 2, 0); + qed_dmae_host2grc(p_hwfn, p_ptt, (u64)(uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_sb_id * sizeof(u64), 2, 0); } else { /* Initialize Status Block Address */ STORE_RT_REG_AGG(p_hwfn, -- cgit v1.2.3 From 06f56b8136b75a04ee6e142a9e345cfd4b357de5 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 21 Feb 2016 11:40:09 +0200 Subject: qed: Change metadata needed for SPQ entries Each configuration element send via ramrod requires a Slow Path Queue entry. This slightly changes the way such an entry is configured, but contains mostly semantic changes [where more parameters are gathered in a sub-struct instead of being directly passed]. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 139 ++++++++++------------ drivers/net/ethernet/qlogic/qed/qed_sp.h | 11 +- drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 52 ++++---- 3 files changed, 95 insertions(+), 107 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 73feaf7eedb8..8d1bc7e7e996 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -142,9 +142,9 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, u8 drop_ttl0_flg, u8 inner_vlan_removal_en_flg) { - struct qed_sp_init_request_params params; struct vport_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; u16 rx_mode = 0; u8 abs_vport_id = 0; @@ -153,16 +153,14 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, if (rc != 0) return rc; - memset(¶ms, 0, sizeof(params)); - params.ramrod_data_size = sizeof(*p_ramrod); - params.comp_mode = QED_SPQ_MODE_EBLOCK; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - opaque_fid, ETH_RAMROD_VPORT_START, - PROTOCOLID_ETH, - ¶ms); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -362,7 +360,7 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, { struct qed_rss_params *p_rss_params = p_params->rss_params; struct vport_update_ramrod_data_cmn *p_cmn; - struct qed_sp_init_request_params sp_params; + struct qed_sp_init_data init_data; struct vport_update_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; u8 abs_vport_id = 0; @@ -372,17 +370,15 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn, if (rc != 0) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = comp_mode; - sp_params.p_comp_data = p_comp_data; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - p_params->opaque_fid, ETH_RAMROD_VPORT_UPDATE, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -416,8 +412,8 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id) { - struct qed_sp_init_request_params sp_params; struct vport_stop_ramrod_data *p_ramrod; + struct qed_sp_init_data init_data; struct qed_spq_entry *p_ent; u8 abs_vport_id = 0; int rc; @@ -426,16 +422,14 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, if (rc != 0) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - opaque_fid, ETH_RAMROD_VPORT_STOP, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -514,8 +508,8 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size) { struct rx_queue_start_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_rx_cid; u16 abs_rx_q_id = 0; u8 abs_vport_id = 0; @@ -540,15 +534,15 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, opaque_fid, cid, params->queue_id, params->vport_id, params->sb); - memset(&sp_params, 0, sizeof(params)); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; - sp_params.ramrod_data_size = sizeof(*p_ramrod); + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = cid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - cid, opaque_fid, ETH_RAMROD_RX_QUEUE_START, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -640,21 +634,20 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, { struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; struct rx_queue_stop_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; u16 abs_rx_q_id = 0; int rc = -EINVAL; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_rx_cid->cid; + init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - p_rx_cid->cid, - p_rx_cid->opaque_fid, ETH_RAMROD_RX_QUEUE_STOP, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -692,8 +685,8 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, union qed_qm_pq_params *p_pq_params) { struct tx_queue_start_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; struct qed_hw_cid_data *p_tx_cid; u8 abs_vport_id; int rc = -EINVAL; @@ -708,15 +701,15 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, if (rc) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = cid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - rc = qed_sp_init_request(p_hwfn, &p_ent, cid, - opaque_fid, + rc = qed_sp_init_request(p_hwfn, &p_ent, ETH_RAMROD_TX_QUEUE_START, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -796,20 +789,19 @@ static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id) { struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(struct tx_queue_stop_ramrod_data); - sp_params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = p_tx_cid->cid; + init_data.opaque_fid = p_tx_cid->opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, - p_tx_cid->cid, - p_tx_cid->opaque_fid, ETH_RAMROD_TX_QUEUE_STOP, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -866,9 +858,9 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, { u8 vport_to_add_to = 0, vport_to_remove_from = 0; struct vport_filter_update_ramrod_data *p_ramrod; - struct qed_sp_init_request_params sp_params; struct eth_filter_cmd *p_first_filter; struct eth_filter_cmd *p_second_filter; + struct qed_sp_init_data init_data; enum eth_filter_action action; int rc; @@ -882,17 +874,16 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn, if (rc) return rc; - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(**pp_ramrod); - sp_params.comp_mode = comp_mode; - sp_params.p_comp_data = p_comp_data; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, pp_ent, - qed_spq_get_cid(p_hwfn), - opaque_fid, ETH_RAMROD_FILTERS_UPDATE, - PROTOCOLID_ETH, - &sp_params); + PROTOCOLID_ETH, &init_data); if (rc) return rc; @@ -1116,8 +1107,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, { unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; struct vport_update_ramrod_data *p_ramrod = NULL; - struct qed_sp_init_request_params sp_params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; u8 abs_vport_id = 0; int rc, i; @@ -1133,18 +1124,16 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn, return rc; } - memset(&sp_params, 0, sizeof(sp_params)); - sp_params.ramrod_data_size = sizeof(*p_ramrod); - sp_params.comp_mode = comp_mode; - sp_params.p_comp_data = p_comp_data; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; rc = qed_sp_init_request(p_hwfn, &p_ent, - qed_spq_get_cid(p_hwfn), - p_hwfn->hw_info.opaque_fid, ETH_RAMROD_VPORT_UPDATE, - PROTOCOLID_ETH, - &sp_params); - + PROTOCOLID_ETH, &init_data); if (rc) { DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); return rc; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 8a83609c443c..d39f914b66ee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -311,19 +311,20 @@ void qed_consq_free(struct qed_hwfn *p_hwfn, #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 -struct qed_sp_init_request_params { - size_t ramrod_data_size; +struct qed_sp_init_data { + u32 cid; + u16 opaque_fid; + + /* Information regarding operation upon sending & completion */ enum spq_mode comp_mode; struct qed_spq_comp_cb *p_comp_data; }; int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, - u32 cid, - u16 opaque_fid, u8 cmd, u8 protocol, - struct qed_sp_init_request_params *p_params); + struct qed_sp_init_data *p_data); /** * @brief qed_sp_pf_start - PF Function Start Ramrod diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 33090f63548c..e271ef95745c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -23,15 +23,13 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent, - u32 cid, - u16 opaque_fid, u8 cmd, u8 protocol, - struct qed_sp_init_request_params *p_params) + struct qed_sp_init_data *p_data) { - int rc = -EINVAL; + u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; struct qed_spq_entry *p_ent = NULL; - u32 opaque_cid = opaque_fid << 16 | cid; + int rc; if (!pp_ent) return -ENOMEM; @@ -48,7 +46,7 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, p_ent->elem.hdr.protocol_id = protocol; p_ent->priority = QED_SPQ_PRIORITY_NORMAL; - p_ent->comp_mode = p_params->comp_mode; + p_ent->comp_mode = p_data->comp_mode; p_ent->comp_done.done = 0; switch (p_ent->comp_mode) { @@ -57,17 +55,17 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, break; case QED_SPQ_MODE_BLOCK: - if (!p_params->p_comp_data) + if (!p_data->p_comp_data) return -EINVAL; - p_ent->comp_cb.cookie = p_params->p_comp_data->cookie; + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; break; case QED_SPQ_MODE_CB: - if (!p_params->p_comp_data) + if (!p_data->p_comp_data) p_ent->comp_cb.function = NULL; else - p_ent->comp_cb = *p_params->p_comp_data; + p_ent->comp_cb = *p_data->p_comp_data; break; default: @@ -83,8 +81,8 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK, QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", "MODE_CB")); - if (p_params->ramrod_data_size) - memset(&p_ent->ramrod, 0, p_params->ramrod_data_size); + + memset(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); return 0; } @@ -92,28 +90,26 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, int qed_sp_pf_start(struct qed_hwfn *p_hwfn, enum qed_mf_mode mode) { - struct qed_sp_init_request_params params; struct pf_start_ramrod_data *p_ramrod = NULL; u16 sb = qed_int_get_sp_sb_id(p_hwfn); u8 sb_index = p_hwfn->p_eq->eq_sb_index; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; /* update initial eq producer */ qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(&p_hwfn->p_eq->chain)); - memset(¶ms, 0, sizeof(params)); - params.ramrod_data_size = sizeof(*p_ramrod); - params.comp_mode = QED_SPQ_MODE_EBLOCK; + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - rc = qed_sp_init_request(p_hwfn, - &p_ent, - qed_spq_get_cid(p_hwfn), - p_hwfn->hw_info.opaque_fid, + rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_START, PROTOCOLID_COMMON, - ¶ms); + &init_data); if (rc) return rc; @@ -163,17 +159,19 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, int qed_sp_pf_stop(struct qed_hwfn *p_hwfn) { - struct qed_sp_init_request_params params; struct qed_spq_entry *p_ent = NULL; + struct qed_sp_init_data init_data; int rc = -EINVAL; - memset(¶ms, 0, sizeof(params)); - params.comp_mode = QED_SPQ_MODE_EBLOCK; + /* Get SPQ entry */ + memset(&init_data, 0, sizeof(init_data)); + init_data.cid = qed_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = QED_SPQ_MODE_EBLOCK; - rc = qed_sp_init_request(p_hwfn, &p_ent, qed_spq_get_cid(p_hwfn), - p_hwfn->hw_info.opaque_fid, + rc = qed_sp_init_request(p_hwfn, &p_ent, COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, - ¶ms); + &init_data); if (rc) return rc; -- cgit v1.2.3 From 944945986f125bdbbeaa78dac0c0eadb963eb34a Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 21 Feb 2016 11:40:10 +0200 Subject: qed: Introduce DMA_REGPAIR_LE FW hsi contains regpairs, mostly for 64-bit address representations. Since same paradigm is applied each time a regpair is filled, this introduces a new utility macro for setting such regpairs. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_l2.c | 9 +++------ drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | 12 ++++-------- drivers/net/ethernet/qlogic/qed/qed_spq.c | 9 +++------ include/linux/qed/qed_chain.h | 4 ++++ 4 files changed, 14 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 8d1bc7e7e996..bba59c51f72c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -557,12 +557,10 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->complete_event_flg = 1; p_ramrod->bd_max_bytes = cpu_to_le16(bd_max_bytes); - p_ramrod->bd_base.hi = DMA_HI_LE(bd_chain_phys_addr); - p_ramrod->bd_base.lo = DMA_LO_LE(bd_chain_phys_addr); + DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size); - p_ramrod->cqe_pbl_addr.hi = DMA_HI_LE(cqe_pbl_addr); - p_ramrod->cqe_pbl_addr.lo = DMA_LO_LE(cqe_pbl_addr); + DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); rc = qed_spq_post(p_hwfn, p_ent, NULL); @@ -721,8 +719,7 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, p_ramrod->stats_counter_id = stats_id; p_ramrod->pbl_size = cpu_to_le16(pbl_size); - p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); - p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr); + DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr); pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_ETH, diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index e271ef95745c..1c06c37d4c3d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c @@ -136,16 +136,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; /* Place EQ address in RAMROD */ - p_ramrod->event_ring_pbl_addr.hi = - DMA_HI_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); - p_ramrod->event_ring_pbl_addr.lo = - DMA_LO_LE(p_hwfn->p_eq->chain.pbl.p_phys_table); + DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, + p_hwfn->p_eq->chain.pbl.p_phys_table); p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt; - p_ramrod->consolid_q_pbl_addr.hi = - DMA_HI_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); - p_ramrod->consolid_q_pbl_addr.lo = - DMA_LO_LE(p_hwfn->p_consq->chain.pbl.p_phys_table); + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + p_hwfn->p_consq->chain.pbl.p_phys_table); p_hwfn->hw_info.personality = PERSONALITY_ETH; diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index f6c6c21601d7..89469d5aae25 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -183,10 +183,8 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, p_cxt->xstorm_st_context.spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr); - p_cxt->xstorm_st_context.consolid_base_addr.lo = - DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); - p_cxt->xstorm_st_context.consolid_base_addr.hi = - DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr); + DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, + p_hwfn->p_consq->chain.p_phys_addr); } static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, @@ -423,8 +421,7 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_virt = p_spq->p_virt; for (i = 0; i < p_spq->chain.capacity; i++) { - p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys); - p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys); + DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); list_add_tail(&p_virt->list, &p_spq->free_pool); diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 41b9049b57e2..5f8fcaaa6504 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -19,6 +19,10 @@ /* dma_addr_t manip */ #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) +#define DMA_REGPAIR_LE(x, val) do { \ + (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)); \ + } while (0) #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) #define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) -- cgit v1.2.3 From d4ee52897b5b5cf9137545f6fd41dc7217565517 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 21 Feb 2016 11:40:11 +0200 Subject: qed,qede: Bump driver versions to 8.7.0.0 Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 2 +- drivers/net/ethernet/qlogic/qede/qede.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d34da638b5d5..66b021e3c1be 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,7 +26,7 @@ #include "qed_hsi.h" extern const struct qed_common_ops qed_common_ops_pass; -#define DRV_MODULE_VERSION "8.4.0.0" +#define DRV_MODULE_VERSION "8.7.0.0" #define MAX_HWFNS_PER_DEVICE (4) #define NAME_SIZE 16 diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 15c5528b4f39..02e17d331f22 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -24,7 +24,7 @@ #include #define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 4 +#define QEDE_MINOR_VERSION 7 #define QEDE_REVISION_VERSION 0 #define QEDE_ENGINEERING_VERSION 0 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ -- cgit v1.2.3 From 0c71de6634c5c1b6c9dcb80049680ad334205c23 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Mon, 22 Feb 2016 22:55:43 +0530 Subject: netcp: use pointer to fix build fail While building keystone_defconfig of arm we are getting build failure with the error: drivers/net/ethernet/ti/netcp_core.c:1846:31: error: invalid type argument of '->' (have 'struct tc_to_netdev') if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) ^ drivers/net/ethernet/ti/netcp_core.c:1851:35: error: invalid type argument of '->' (have 'struct tc_to_netdev') (dev->real_num_tx_queues < tc->tc)) ^ drivers/net/ethernet/ti/netcp_core.c:1855:8: error: invalid type argument of '->' (have 'struct tc_to_netdev') if (tc->tc) { ^ drivers/net/ethernet/ti/netcp_core.c:1856:28: error: invalid type argument of '->' (have 'struct tc_to_netdev') netdev_set_num_tc(dev, tc->tc); ^ drivers/net/ethernet/ti/netcp_core.c:1857:21: error: invalid type argument of '->' (have 'struct tc_to_netdev') for (i = 0; i < tc->tc; i++) ^ drivers/net/ethernet/ti/netcp_core.c: At top level: drivers/net/ethernet/ti/netcp_core.c:1879:2: warning: initialization from incompatible pointer type .ndo_setup_tc = netcp_setup_tc, ^ The callback of ndo_setup_tc should be: int (*ndo_setup_tc)(struct net_device *dev, u32 handle, __be16 protocol, struct tc_to_netdev *tc); But we missed marking the last argument as a pointer. Fixes: 16e5cc647173 ("net: rework setup_tc ndo op to consume general tc operand") CC: John Fastabend Signed-off-by: Sudip Mukherjee Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/netcp_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 8586a2034019..06a0a73f1dcc 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1836,7 +1836,7 @@ static u16 netcp_select_queue(struct net_device *dev, struct sk_buff *skb, } static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, - struct tc_to_netdev tc) + struct tc_to_netdev *tc) { int i; -- cgit v1.2.3 From a30a9ea6e21b495372aff549f3dfd63198bd1f45 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 23 Feb 2016 12:37:52 +0300 Subject: rocker: fix rocker_world_port_obj_vlan_add() We were changing return values and accidentally made rocker_world_port_obj_vlan_add() into a no-op. Fixes: fccd84d44912 ('rocker: return -EOPNOTSUPP for undefined world ops') Signed-off-by: Dan Carpenter Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_main.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index acafbf870182..28b775e5a9ad 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1598,7 +1598,6 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, if (!wops->port_obj_vlan_add) return -EOPNOTSUPP; - return 0; return wops->port_obj_vlan_add(rocker_port, vlan, trans); } -- cgit v1.2.3 From d2ee9c2ec659cfab715568d2d9837f7ff67402fa Mon Sep 17 00:00:00 2001 From: Bhumika Goyal Date: Tue, 12 Jan 2016 22:10:15 +0530 Subject: Bluetooth: ath3k: Fixed a blank line after declaration issue Fixed a coding style issue. Added a blank link after declaration. Signed-off-by: Bhumika Goyal Signed-off-by: Marcel Holtmann --- drivers/bluetooth/ath3k.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index fa893c3ec408..ebd641b85396 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -497,6 +497,7 @@ static int ath3k_probe(struct usb_interface *intf, /* match device ID in ath3k blacklist table */ if (!id->driver_info) { const struct usb_device_id *match; + match = usb_match_id(intf, ath3k_blist_tbl); if (match) id = match; -- cgit v1.2.3 From 2be1149ed40812ab75bc1af67a68f5d09e8be762 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Wed, 10 Feb 2016 12:22:54 -0500 Subject: Bluetooth: hci_intel: Fix a wrong comparison A return value of the intel_wait_booting() function compared with a constant ETIMEDOUT instead of -ETIMEDOUT. Signed-off-by: Anton Protopopov Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_intel.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c index 3d63ea37bd4c..91d605147b10 100644 --- a/drivers/bluetooth/hci_intel.c +++ b/drivers/bluetooth/hci_intel.c @@ -488,7 +488,7 @@ static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) clear_bit(STATE_BOOTING, &intel->flags); /* In case of timeout, try to continue anyway */ - if (err && err != ETIMEDOUT) + if (err && err != -ETIMEDOUT) return err; bt_dev_info(hdev, "Change controller speed to %d", speed); @@ -581,7 +581,7 @@ static int intel_setup(struct hci_uart *hu) clear_bit(STATE_BOOTING, &intel->flags); /* In case of timeout, try to continue anyway */ - if (err && err != ETIMEDOUT) + if (err && err != -ETIMEDOUT) return err; set_bit(STATE_BOOTLOADER, &intel->flags); -- cgit v1.2.3 From 609574eb46335cfac1421a07c0505627cbbab1f0 Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Wed, 10 Feb 2016 15:33:17 +0300 Subject: Bluetooth: btusb: Add new AR3012 ID 13d3:3395 T: Bus=03 Lev=02 Prnt=02 Port=00 Cnt=01 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=13d3 ProdID=3395 Rev=00.01 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb BugLink: https://bugs.launchpad.net/bugs/1542564 Reported-and-tested-by: Christopher Simerly Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org --- drivers/bluetooth/ath3k.c | 2 ++ drivers/bluetooth/btusb.c | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index ebd641b85396..3532a779c659 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -113,6 +113,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3393) }, + { USB_DEVICE(0x13d3, 0x3395) }, { USB_DEVICE(0x13d3, 0x3402) }, { USB_DEVICE(0x13d3, 0x3408) }, { USB_DEVICE(0x13d3, 0x3423) }, @@ -175,6 +176,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index a191e318fab8..ab6328c2d0d0 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -227,6 +227,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, -- cgit v1.2.3 From b013a636b8571fa62b0fd3abeb2b36f0fc7199dd Mon Sep 17 00:00:00 2001 From: "J.J. Meijer" Date: Mon, 1 Feb 2016 23:47:55 +0100 Subject: Bluetooth: hci_bcm: Add new ACPI ID for bcm43241 This ACPI ID is used at least by HP for their Omni 10 5600eg tablet. Signed-off-by: J.J. Meijer Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 5f3de181e744..7092dab79379 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -820,6 +820,7 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E3D", 0 }, { "BCM2E3F", 0 }, { "BCM2E40", 0 }, + { "BCM2E54", 0 }, { "BCM2E64", 0 }, { "BCM2E65", 0 }, { "BCM2E67", 0 }, -- cgit v1.2.3 From 2791b44d6b4a2280568e0dc84be51992b7b367f1 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Fri, 29 Jan 2016 16:53:27 +0200 Subject: Bluetooth: hci_bcm: Add BCM2E7C ACPI ID Recent macbooks (early 2015) with BCM43241 use this ACPI ID. Add it to the list of supported devices. Reported-by: Leif Liddy Signed-off-by: Mika Westerberg Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 7092dab79379..bb4c5a00aea0 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -825,6 +825,7 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E65", 0 }, { "BCM2E67", 0 }, { "BCM2E7B", 0 }, + { "BCM2E7C", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); -- cgit v1.2.3 From ad750fa142747f3f42b66ae051a0f275bd2035df Mon Sep 17 00:00:00 2001 From: Petri Gynther Date: Wed, 10 Feb 2016 12:00:46 -0800 Subject: Bluetooth: btbcm: Fix handling of firmware not found If the call to request_firmware() fails in btbcm_setup_patchram(), the BCM chip will be operating with its default firmware. In this case, btbcm_setup_patchram() should not return immediately but instead should skip to btbcm_check_bdaddr() and quirk setup. Signed-off-by: Petri Gynther Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btbcm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 0b697946e9bc..fdb44829ab6f 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -467,7 +467,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev) err = request_firmware(&fw, fw_name, &hdev->dev); if (err < 0) { BT_INFO("%s: BCM: Patch %s not found", hdev->name, fw_name); - return 0; + goto done; } btbcm_patchram(hdev, fw); @@ -501,6 +501,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev) BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); kfree_skb(skb); +done: btbcm_check_bdaddr(hdev); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); -- cgit v1.2.3 From 28c971d82fb58ef7cba22e5308be6d2d2590473d Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Wed, 10 Feb 2016 00:49:11 +0300 Subject: Bluetooth: Add new AR3012 ID 0489:e095 T: Bus=01 Lev=01 Prnt=01 Port=04 Cnt=02 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=0489 ProdID=e095 Rev=00.01 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb This device requires ar3k/AthrBT_0x31010100.dfu and ar3k/ramps_0x31010100_40.dfu firmware files that are not in linux-firmware yet. BugLink: https://bugs.launchpad.net/bugs/1542944 Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org --- drivers/bluetooth/ath3k.c | 2 ++ drivers/bluetooth/btusb.c | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 3532a779c659..e2ccf906d691 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0489, 0xe05f) }, { USB_DEVICE(0x0489, 0xe076) }, { USB_DEVICE(0x0489, 0xe078) }, + { USB_DEVICE(0x0489, 0xe095) }, { USB_DEVICE(0x04c5, 0x1330) }, { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, @@ -145,6 +146,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index ab6328c2d0d0..55fbdfc688e3 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -196,6 +196,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, -- cgit v1.2.3 From c231c5a47a0c697e7bc821af0b5cb28d129fe8e0 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 19 Feb 2016 09:59:12 +0100 Subject: at86rf230: fix race on error handling The resource "ctx" can be still used by at86rf230_async_state_change, we need to free it at the complete handler of the async state change to avoid a use after free. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 0fbbba7a0cae..bf3cfe44b84f 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -343,16 +343,26 @@ static const struct regmap_config at86rf230_regmap_spi_config = { }; static void -at86rf230_async_error_recover(void *context) +at86rf230_async_error_recover_complete(void *context) { struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; - at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL); - ieee802154_wake_queue(lp->hw); if (ctx->free) kfree(ctx); + + ieee802154_wake_queue(lp->hw); +} + +static void +at86rf230_async_error_recover(void *context) +{ + struct at86rf230_state_change *ctx = context; + struct at86rf230_local *lp = ctx->lp; + + lp->is_tx = 0; + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, + at86rf230_async_error_recover_complete); } static inline void -- cgit v1.2.3 From d981b5b5fe8ed0c237b2925ab37a17d28405494f Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 19 Feb 2016 09:59:13 +0100 Subject: at86rf230: fix state change handling on error This patch force always to set "is_tx_from_off", when calibration timeout was not occurred. In case of error handling the is_tx_from_off can be inside in an invalid state. Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/at86rf230.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index bf3cfe44b84f..cb9e9fe6d77a 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -902,14 +902,12 @@ at86rf230_xmit_start(void *context) struct at86rf230_local *lp = ctx->lp; /* check if we change from off state */ - if (lp->is_tx_from_off) { - lp->is_tx_from_off = false; + if (lp->is_tx_from_off) at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, at86rf230_write_frame); - } else { + else at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_xmit_tx_on); - } } static int @@ -933,6 +931,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb) at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, at86rf230_xmit_start); } else { + lp->is_tx_from_off = false; at86rf230_xmit_start(ctx); } -- cgit v1.2.3 From 6367551f462a3c1e2f38f5ea7335e49443379ab2 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 19 Feb 2016 09:59:14 +0100 Subject: mrf24j40: add writeable missing reg This patch adds a missing reg for writeable stuff for regmap. Cc: Alan Ott Signed-off-by: Alexander Aring Signed-off-by: Marcel Holtmann --- drivers/net/ieee802154/mrf24j40.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c index 4cdf51638972..764a2bddfaee 100644 --- a/drivers/net/ieee802154/mrf24j40.c +++ b/drivers/net/ieee802154/mrf24j40.c @@ -310,6 +310,7 @@ mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg) case REG_TRISGPIO: case REG_GPIO: case REG_RFCTL: + case REG_SECCR2: case REG_SLPACK: case REG_BBREG0: case REG_BBREG1: -- cgit v1.2.3 From d715fa6431a794e6a8cdb53d87acd3d03ed8a941 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 12 Feb 2016 12:09:38 -0500 Subject: net: dsa: mv88e6xxx: add port private structure Add a per-port mv88e6xxx_priv_port structure to store per-port related data, instead of adding several arrays of DSA_MAX_PORTS elements in the mv88e6xxx_priv_state structure. It currently only contains the port STP state. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 4 ++-- drivers/net/dsa/mv88e6xxx.h | 7 ++++++- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 512c8c0be1b4..b0e00edb302e 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1131,7 +1131,7 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) /* mv88e6xxx_port_stp_update may be called with softirqs disabled, * so we can not update the port state directly but need to schedule it. */ - ps->port_state[port] = stp_state; + ps->ports[port].state = stp_state; set_bit(port, &ps->port_state_update_mask); schedule_work(&ps->bridge_work); @@ -1925,7 +1925,7 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) while (ps->port_state_update_mask) { port = __ffs(ps->port_state_update_mask); clear_bit(port, &ps->port_state_update_mask); - mv88e6xxx_set_port_state(ds, port, ps->port_state[port]); + mv88e6xxx_set_port_state(ds, port, ps->ports[port].state); } } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index ca08f913d302..63a6f587e9e8 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -379,6 +379,10 @@ struct mv88e6xxx_vtu_stu_entry { u8 data[DSA_MAX_PORTS]; }; +struct mv88e6xxx_priv_port { + u8 state; +}; + struct mv88e6xxx_priv_state { /* When using multi-chip addressing, this mutex protects * access to the indirect access registers. (In single-chip @@ -415,8 +419,9 @@ struct mv88e6xxx_priv_state { int id; /* switch product id */ int num_ports; /* number of switch ports */ + struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; + unsigned long port_state_update_mask; - u8 port_state[DSA_MAX_PORTS]; struct work_struct bridge_work; }; -- cgit v1.2.3 From a6692754d61a6b3735803783f394880805675f99 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 12 Feb 2016 12:09:39 -0500 Subject: net: dsa: pass bridge down to drivers Some DSA drivers may or may not support multiple software bridges on top of an hardware switch. It is more convenient for them to access the bridge's net_device for finer configuration. Removing the need to craft and access a bitmask also simplifies the code. This patch changes the signature of bridge related functions, update DSA drivers, and removes dsa_slave_br_port_mask. Signed-off-by: Vivien Didelot Tested-by: Florian Fainelli Signed-off-by: David S. Miller --- Documentation/networking/dsa/dsa.txt | 7 ++----- drivers/net/dsa/bcm_sf2.c | 12 +++++++----- drivers/net/dsa/bcm_sf2.h | 2 ++ drivers/net/dsa/mv88e6xxx.c | 13 +++++++++++-- drivers/net/dsa/mv88e6xxx.h | 6 ++++-- include/net/dsa.h | 5 ++--- net/dsa/slave.c | 31 ++----------------------------- 7 files changed, 30 insertions(+), 46 deletions(-) (limited to 'drivers') diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index aa9c1f9313cd..ebf21530471f 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -524,17 +524,14 @@ Bridge layer - port_join_bridge: bridge layer function invoked when a given switch port is added to a bridge, this function should be doing the necessary at the switch level to permit the joining port from being added to the relevant logical - domain for it to ingress/egress traffic with other members of the bridge. DSA - does nothing but calculate a bitmask of switch ports currently members of the - specified bridge being requested the join + domain for it to ingress/egress traffic with other members of the bridge. - port_leave_bridge: bridge layer function invoked when a given switch port is removed from a bridge, this function should be doing the necessary at the switch level to deny the leaving port from ingress/egress traffic from the remaining bridge members. When the port leaves the bridge, it should be aged out at the switch hardware for the switch to (re) learn MAC addresses behind - this port. DSA calculates the bitmask of ports still members of the bridge - being left + this port. - port_stp_update: bridge layer function invoked when a given switch port STP state is computed by the bridge layer and should be propagated to switch diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 6f946fedbb77..3f627598f277 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -483,16 +483,17 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) } static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, - u32 br_port_mask) + struct net_device *bridge) { struct bcm_sf2_priv *priv = ds_to_priv(ds); unsigned int i; u32 reg, p_ctl; + priv->port_sts[port].bridge_dev = bridge; p_ctl = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(port)); for (i = 0; i < priv->hw_params.num_ports; i++) { - if (!((1 << i) & br_port_mask)) + if (priv->port_sts[i].bridge_dev != bridge) continue; /* Add this local port to the remote port VLAN control @@ -515,10 +516,10 @@ static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, - u32 br_port_mask) +static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); + struct net_device *bridge = priv->port_sts[port].bridge_dev; unsigned int i; u32 reg, p_ctl; @@ -526,7 +527,7 @@ static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, for (i = 0; i < priv->hw_params.num_ports; i++) { /* Don't touch the remaining ports */ - if (!((1 << i) & br_port_mask)) + if (priv->port_sts[i].bridge_dev != bridge) continue; reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i)); @@ -541,6 +542,7 @@ static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port, core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; + priv->port_sts[port].bridge_dev = NULL; return 0; } diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index 6bba1c98d764..200b1f5fdb56 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -50,6 +50,8 @@ struct bcm_sf2_port_status { struct ethtool_eee eee; u32 vlan_ctl_mask; + + struct net_device *bridge_dev; }; struct bcm_sf2_arl_entry { diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index b0e00edb302e..2e515e8a95fe 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1889,13 +1889,22 @@ unlock: return err; } -int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) +int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + ps->ports[port].bridge_dev = bridge; + return 0; } -int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) +int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + ps->ports[port].bridge_dev = NULL; + return 0; } diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 63a6f587e9e8..260b4918e427 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -380,6 +380,7 @@ struct mv88e6xxx_vtu_stu_entry { }; struct mv88e6xxx_priv_port { + struct net_device *bridge_dev; u8 state; }; @@ -481,8 +482,9 @@ int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum, int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e); int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e); -int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members); -int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members); +int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *bridge); +int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, diff --git a/include/net/dsa.h b/include/net/dsa.h index 26a0e86e611e..1c845d7bf0b2 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -297,9 +297,8 @@ struct dsa_switch_driver { * Bridge integration */ int (*port_join_bridge)(struct dsa_switch *ds, int port, - u32 br_port_mask); - int (*port_leave_bridge)(struct dsa_switch *ds, int port, - u32 br_port_mask); + struct net_device *bridge); + int (*port_leave_bridge)(struct dsa_switch *ds, int port); int (*port_stp_update)(struct dsa_switch *ds, int port, u8 state); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index ab24521beb4d..ab515df5f493 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -385,31 +385,6 @@ static int dsa_slave_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -/* Return a bitmask of all ports being currently bridged within a given bridge - * device. Note that on leave, the mask will still return the bitmask of ports - * currently bridged, prior to port removal, and this is exactly what we want. - */ -static u32 dsa_slave_br_port_mask(struct dsa_switch *ds, - struct net_device *bridge) -{ - struct dsa_slave_priv *p; - unsigned int port; - u32 mask = 0; - - for (port = 0; port < DSA_MAX_PORTS; port++) { - if (!dsa_is_port_initialized(ds, port)) - continue; - - p = netdev_priv(ds->ports[port]); - - if (ds->ports[port]->priv_flags & IFF_BRIDGE_PORT && - p->bridge_dev == bridge) - mask |= 1 << port; - } - - return mask; -} - static int dsa_slave_stp_update(struct net_device *dev, u8 state) { struct dsa_slave_priv *p = netdev_priv(dev); @@ -533,8 +508,7 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, p->bridge_dev = br; if (ds->drv->port_join_bridge) - ret = ds->drv->port_join_bridge(ds, p->port, - dsa_slave_br_port_mask(ds, br)); + ret = ds->drv->port_join_bridge(ds, p->port, br); return ret; } @@ -547,8 +521,7 @@ static int dsa_slave_bridge_port_leave(struct net_device *dev) if (ds->drv->port_leave_bridge) - ret = ds->drv->port_leave_bridge(ds, p->port, - dsa_slave_br_port_mask(ds, p->bridge_dev)); + ret = ds->drv->port_leave_bridge(ds, p->port); p->bridge_dev = NULL; -- cgit v1.2.3 From da9c359e19f0a72a386a0e83a098b6dae21aa3c3 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 12 Feb 2016 12:09:40 -0500 Subject: net: dsa: mv88e6xxx: check hardware VLAN in use The DSA drivers now have access to the VLAN prepare phase and the bridge net_device. It is easier to check for overlapping bridges from within the driver. Thus add such check in mv88e6xxx_port_vlan_prepare. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 64 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 2e515e8a95fe..685dcb047979 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1471,14 +1471,78 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, return 0; } +static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, + u16 vid_begin, u16 vid_end) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry vlan; + int i, err; + + if (!vid_begin) + return -EOPNOTSUPP; + + mutex_lock(&ps->smi_mutex); + + err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(ds, &vlan); + if (err) + goto unlock; + + if (!vlan.valid) + break; + + if (vlan.vid > vid_end) + break; + + for (i = 0; i < ps->num_ports; ++i) { + if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i)) + continue; + + if (vlan.data[i] == + GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + continue; + + if (ps->ports[i].bridge_dev == + ps->ports[port].bridge_dev) + break; /* same bridge, check next VLAN */ + + netdev_warn(ds->ports[port], + "hardware VLAN %d already used by %s\n", + vlan.vid, + netdev_name(ps->ports[i].bridge_dev)); + err = -EOPNOTSUPP; + goto unlock; + } + } while (vlan.vid < vid_end); + +unlock: + mutex_unlock(&ps->smi_mutex); + + return err; +} + int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) { + int err; + /* We reserve a few VLANs to isolate unbridged ports */ if (vlan->vid_end >= 4000) return -EOPNOTSUPP; + /* If the requested port doesn't belong to the same bridge as the VLAN + * members, do not support it (yet) and fallback to software VLAN. + */ + err = mv88e6xxx_port_check_hw_vlan(ds, port, vlan->vid_begin, + vlan->vid_end); + if (err) + return err; + /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. */ -- cgit v1.2.3 From f06b7ab875037c3d99cd30a07c51caf34fbecb2c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 13 Jan 2016 14:21:01 +0100 Subject: mac80211_hwsim: remove shadowing variable The function here already has a variable hdr that even contains the right thing, so the inner scope's hdr variable that's shadowing the outer one can just be removed. Signed-off-by: Johannes Berg --- drivers/net/wireless/mac80211_hwsim.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index e31a94fd6135..ee37af1066d2 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1333,10 +1333,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, data->tx_bytes += skb->len; ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); - if (ack && skb->len >= 16) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + if (ack && skb->len >= 16) mac80211_hwsim_monitor_ack(channel, hdr->addr2); - } ieee80211_tx_info_clear_status(txi); -- cgit v1.2.3 From 34d505193bd10668acf1caba02d2f66bddc23fea Mon Sep 17 00:00:00 2001 From: Lior David Date: Thu, 28 Jan 2016 10:58:25 +0200 Subject: cfg80211: basic support for PBSS network type PBSS (Personal Basic Service Set) is a new BSS type for DMG networks. It is similar to infrastructure BSS, having an AP-like entity called PCP (PBSS Control Point), but it has few differences. PBSS support is mandatory for 11ad devices. Add support for PBSS by introducing a new PBSS flag attribute. The PBSS flag is used in the START_AP command to request starting a PCP instead of an AP, and in the CONNECT command to request connecting to a PCP instead of an AP. Signed-off-by: Lior David Signed-off-by: Johannes Berg --- drivers/net/wireless/ath/wil6210/cfg80211.c | 10 ++++++++++ include/net/cfg80211.h | 8 ++++++++ include/uapi/linux/nl80211.h | 6 ++++++ net/wireless/nl80211.c | 11 +++++++++++ net/wireless/sme.c | 9 ++++++--- 5 files changed, 41 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index 20d07ef679e8..1f231cd08138 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -422,6 +422,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy, if (sme->privacy && !rsn_eid) wil_info(wil, "WSC connection\n"); + if (sme->pbss) { + wil_err(wil, "connect - PBSS not yet supported\n"); + return -EOPNOTSUPP; + } + bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); @@ -870,6 +875,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy, return -EINVAL; } + if (info->pbss) { + wil_err(wil, "AP: PBSS not yet supported\n"); + return -EOPNOTSUPP; + } + switch (info->hidden_ssid) { case NL80211_HIDDEN_SSID_NOT_IN_USE: hidden_ssid = WMI_HIDDEN_SSID_DISABLED; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 9bcaaf7cd15a..9e1b24c29f0c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -712,6 +712,8 @@ struct cfg80211_acl_data { * @p2p_opp_ps: P2P opportunistic PS * @acl: ACL configuration used by the drivers which has support for * MAC address based access control + * @pbss: If set, start as a PCP instead of AP. Relevant for DMG + * networks. */ struct cfg80211_ap_settings { struct cfg80211_chan_def chandef; @@ -730,6 +732,7 @@ struct cfg80211_ap_settings { u8 p2p_ctwindow; bool p2p_opp_ps; const struct cfg80211_acl_data *acl; + bool pbss; }; /** @@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params { * @ht_capa_mask: The bits of ht_capa which are to be used. * @vht_capa: VHT Capability overrides * @vht_capa_mask: The bits of vht_capa which are to be used. + * @pbss: if set, connect to a PCP instead of AP. Valid for DMG + * networks. */ struct cfg80211_connect_params { struct ieee80211_channel *channel; @@ -1910,6 +1915,7 @@ struct cfg80211_connect_params { struct ieee80211_ht_cap ht_capa_mask; struct ieee80211_vht_cap vht_capa; struct ieee80211_vht_cap vht_capa_mask; + bool pbss; }; /** @@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys; * registered for unexpected class 3 frames (AP mode) * @conn: (private) cfg80211 software SME connection state machine data * @connect_keys: (private) keys to set after connection is established + * @conn_bss_type: connecting/connected BSS type * @ibss_fixed: (private) IBSS is using fixed BSSID * @ibss_dfs_possible: (private) IBSS may change to a DFS channel * @event_list: (private) list for internal event processing @@ -3519,6 +3526,7 @@ struct wireless_dev { u8 ssid_len, mesh_id_len, mesh_id_up_len; struct cfg80211_conn *conn; struct cfg80211_cached_keys *connect_keys; + enum ieee80211_bss_type conn_bss_type; struct list_head event_list; spinlock_t event_lock; diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index 5b7b5ebe7ca8..7758969a2a8e 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -1789,6 +1789,10 @@ enum nl80211_commands { * thus it must not specify the number of iterations, only the interval * between scans. The scan plans are executed sequentially. * Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan. + * @NL80211_ATTR_PBSS: flag attribute. If set it means operate + * in a PBSS. Specified in %NL80211_CMD_CONNECT to request + * connecting to a PCP, and in %NL80211_CMD_START_AP to start + * a PCP instead of AP. Relevant for DMG networks only. * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined @@ -2164,6 +2168,8 @@ enum nl80211_attrs { NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS, NL80211_ATTR_SCHED_SCAN_PLANS, + NL80211_ATTR_PBSS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d4786f2802aa..268cb493f6a5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -401,6 +401,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 }, [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 }, [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG }, + [NL80211_ATTR_PBSS] = { .type = NLA_FLAG }, }; /* policy for the key attributes */ @@ -3461,6 +3462,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(params.acl); } + params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); + if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) + return -EOPNOTSUPP; + wdev_lock(wdev); err = rdev_start_ap(rdev, dev, ¶ms); if (!err) { @@ -7980,6 +7985,12 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) connect.flags |= ASSOC_REQ_USE_RRM; } + connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]); + if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) { + kzfree(connkeys); + return -EOPNOTSUPP; + } + wdev_lock(dev->ieee80211_ptr); err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL); wdev_unlock(dev->ieee80211_ptr); diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 8020b5b094d4..79bd3a171caa 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -264,7 +264,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev) wdev->conn->params.bssid, wdev->conn->params.ssid, wdev->conn->params.ssid_len, - IEEE80211_BSS_TYPE_ESS, + wdev->conn_bss_type, IEEE80211_PRIVACY(wdev->conn->params.privacy)); if (!bss) return NULL; @@ -687,7 +687,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect); bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid, wdev->ssid, wdev->ssid_len, - IEEE80211_BSS_TYPE_ESS, + wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); if (bss) cfg80211_hold_bss(bss_from_pub(bss)); @@ -846,7 +846,7 @@ void cfg80211_roamed(struct net_device *dev, bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid, wdev->ssid_len, - IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY); + wdev->conn_bss_type, IEEE80211_PRIVACY_ANY); if (WARN_ON(!bss)) return; @@ -1017,6 +1017,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, memcpy(wdev->ssid, connect->ssid, connect->ssid_len); wdev->ssid_len = connect->ssid_len; + wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS : + IEEE80211_BSS_TYPE_ESS; + if (!rdev->ops->connect) err = cfg80211_sme_connect(wdev, connect, prev_bssid); else -- cgit v1.2.3 From 1ad4f639cc3e09d6c8402a0fcc592e2391683e31 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Sun, 14 Feb 2016 13:56:36 +0200 Subject: iwlwifi: mvm: move TX PN assignment for TKIP to the driver If protocol offloading is configured, the fw might generate some frames (e.g. arp response) on its own during d3/d0i3. On d3/d0i3 exit the driver queries the updated PN (if relevant), and updates its keys (for the d0i3 case, this is done by iwl_mvm_d0i3_exit_work(), which is scheduled on d0i3 exit) While in d0i3, iwlmvm defers tx frames until d0i3 exit, and then continues their processing. This is problematic with TKIP, since the frame's PN has already been set at this stage (in contrast to CCMP, where the PN is being set only later on), so both the frame's PN and the upcoming PN update (from d0i3 exit work) might be wrong. Fix it by moving the TX PN assignment (for TKIP) to the driver, similarly to CCMP. Signed-off-by: Eliad Peller Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 15 ++++++++++----- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 2 ++ 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index d3e21d95cece..93e495861214 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -249,16 +249,19 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw, return; case WLAN_CIPHER_SUITE_TKIP: if (sta) { + u64 pn64; + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; rx_p1ks = data->tkip->rx_uni; - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + pn64 = atomic64_read(&key->tx_pn); + tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); + tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); - ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); + ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64), + p1k); iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k); memcpy(data->tkip->mic_keys.tx, @@ -1601,7 +1604,9 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw, case WLAN_CIPHER_SUITE_TKIP: iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq); iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key); - ieee80211_set_key_tx_seq(key, &seq); + atomic64_set(&key->tx_pn, + (u64)seq.tkip.iv16 | + ((u64)seq.tkip.iv32 << 16)); break; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1bd3f0b700d3..2b532925781a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2585,7 +2585,7 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; - key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; + key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; break; case WLAN_CIPHER_SUITE_CCMP: key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8bf48a7d0f4e..ca1e485a6adc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -299,6 +299,8 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_TKIP: tx_cmd->sec_ctl = TX_CMD_SEC_TKIP; + pn = atomic64_inc_return(&keyconf->tx_pn); + ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn); ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key); break; -- cgit v1.2.3 From ca48ebbc7ea7e82e3ae4b55aacead0cdb54ff008 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Mon, 15 Feb 2016 12:34:10 +0200 Subject: mac80211: remove ieee80211_get_key_tx_seq/ieee80211_set_key_tx_seq Since the PNs of all the tx keys are now tracked in the public part of the key struct (with atomic counter), we no longer need these functions. dvm and vt665{5,6} are currently the only users of these functions, so update them accordingly. Signed-off-by: Eliad Peller Signed-off-by: Emmanuel Grumbach Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/dvm/lib.c | 20 +++---- drivers/staging/vt6655/rxtx.c | 12 ++-- drivers/staging/vt6656/rxtx.c | 12 ++-- include/net/mac80211.h | 34 ----------- net/mac80211/key.c | 87 ---------------------------- 5 files changed, 24 insertions(+), 141 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c index 4841be2aa499..1799469268ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c @@ -943,14 +943,16 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (sta) { + u64 pn64; + tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc; tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc; rx_p1ks = data->tkip->rx_uni; - ieee80211_get_key_tx_seq(key, &seq); - tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16); - tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32); + pn64 = atomic64_read(&key->tx_pn); + tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64)); + tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64)); ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k); iwlagn_convert_p1k(p1k, data->tkip->tx.p1k); @@ -996,19 +998,13 @@ static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw, break; case WLAN_CIPHER_SUITE_CCMP: if (sta) { - u8 *pn = seq.ccmp.pn; + u64 pn64; aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc; aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc; - ieee80211_get_key_tx_seq(key, &seq); - aes_tx_sc->pn = cpu_to_le64( - (u64)pn[5] | - ((u64)pn[4] << 8) | - ((u64)pn[3] << 16) | - ((u64)pn[2] << 24) | - ((u64)pn[1] << 32) | - ((u64)pn[0] << 40)); + pn64 = atomic64_read(&key->tx_pn); + aes_tx_sc->pn = cpu_to_le64(pn64); } else aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc; diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index b668db6a45fb..1a2dda09b69d 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -1210,7 +1210,7 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer, struct sk_buff *skb, u16 payload_len, struct vnt_mic_hdr *mic_hdr) { - struct ieee80211_key_seq seq; + u64 pn64; u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb)); /* strip header and icv len from payload */ @@ -1243,9 +1243,13 @@ static void vnt_fill_txkey(struct ieee80211_hdr *hdr, u8 *key_buffer, mic_hdr->payload_len = cpu_to_be16(payload_len); ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2); - ieee80211_get_key_tx_seq(tx_key, &seq); - - memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); + pn64 = atomic64_read(&tx_key->tx_pn); + mic_hdr->ccmp_pn[5] = pn64; + mic_hdr->ccmp_pn[4] = pn64 >> 8; + mic_hdr->ccmp_pn[3] = pn64 >> 16; + mic_hdr->ccmp_pn[2] = pn64 >> 24; + mic_hdr->ccmp_pn[1] = pn64 >> 32; + mic_hdr->ccmp_pn[0] = pn64 >> 40; if (ieee80211_has_a4(hdr->frame_control)) mic_hdr->hlen = cpu_to_be16(28); diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c index efb54f53b4f9..76378d225b46 100644 --- a/drivers/staging/vt6656/rxtx.c +++ b/drivers/staging/vt6656/rxtx.c @@ -719,7 +719,7 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context, u16 payload_len, struct vnt_mic_hdr *mic_hdr) { struct ieee80211_hdr *hdr = tx_context->hdr; - struct ieee80211_key_seq seq; + u64 pn64; u8 *iv = ((u8 *)hdr + ieee80211_get_hdrlen_from_skb(skb)); /* strip header and icv len from payload */ @@ -752,9 +752,13 @@ static void vnt_fill_txkey(struct vnt_usb_send_context *tx_context, mic_hdr->payload_len = cpu_to_be16(payload_len); ether_addr_copy(mic_hdr->mic_addr2, hdr->addr2); - ieee80211_get_key_tx_seq(tx_key, &seq); - - memcpy(mic_hdr->ccmp_pn, seq.ccmp.pn, IEEE80211_CCMP_PN_LEN); + pn64 = atomic64_read(&tx_key->tx_pn); + mic_hdr->ccmp_pn[5] = pn64; + mic_hdr->ccmp_pn[4] = pn64 >> 8; + mic_hdr->ccmp_pn[3] = pn64 >> 16; + mic_hdr->ccmp_pn[2] = pn64 >> 24; + mic_hdr->ccmp_pn[1] = pn64 >> 32; + mic_hdr->ccmp_pn[0] = pn64 >> 40; if (ieee80211_has_a4(hdr->frame_control)) mic_hdr->hlen = cpu_to_be16(28); diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 15879b49baad..66155d3ad7e6 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -4463,23 +4463,6 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf, */ u8 *ieee80211_tkip_add_iv(u8 *pos, struct ieee80211_key_conf *keyconf, u64 pn); -/** - * ieee80211_get_key_tx_seq - get key TX sequence counter - * - * @keyconf: the parameter passed with the set key - * @seq: buffer to receive the sequence data - * - * This function allows a driver to retrieve the current TX IV/PN - * for the given key. It must not be called if IV generation is - * offloaded to the device. - * - * Note that this function may only be called when no TX processing - * can be done concurrently, for example when queues are stopped - * and the stop has been synchronized. - */ -void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq); - /** * ieee80211_get_key_rx_seq - get key RX sequence counter * @@ -4499,23 +4482,6 @@ void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq); -/** - * ieee80211_set_key_tx_seq - set key TX sequence counter - * - * @keyconf: the parameter passed with the set key - * @seq: new sequence data - * - * This function allows a driver to set the current TX IV/PNs for the - * given key. This is useful when resuming from WoWLAN sleep and the - * device may have transmitted frames using the PTK, e.g. replies to - * ARP requests. - * - * Note that this function may only be called when no TX processing - * can be done concurrently. - */ -void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq); - /** * ieee80211_set_key_rx_seq - set key RX sequence counter * diff --git a/net/mac80211/key.c b/net/mac80211/key.c index f9c4cb9c6e06..3df7b0392d30 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -932,51 +932,6 @@ void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, } EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_notify); -void ieee80211_get_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq) -{ - struct ieee80211_key *key; - u64 pn64; - - if (WARN_ON(!(keyconf->flags & IEEE80211_KEY_FLAG_GENERATE_IV))) - return; - - key = container_of(keyconf, struct ieee80211_key, conf); - - switch (key->conf.cipher) { - case WLAN_CIPHER_SUITE_TKIP: - pn64 = atomic64_read(&key->conf.tx_pn); - seq->tkip.iv32 = TKIP_PN_TO_IV32(pn64); - seq->tkip.iv16 = TKIP_PN_TO_IV16(pn64); - break; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_CCMP_256: - case WLAN_CIPHER_SUITE_AES_CMAC: - case WLAN_CIPHER_SUITE_BIP_CMAC_256: - BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) != - offsetof(typeof(*seq), aes_cmac)); - case WLAN_CIPHER_SUITE_BIP_GMAC_128: - case WLAN_CIPHER_SUITE_BIP_GMAC_256: - BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) != - offsetof(typeof(*seq), aes_gmac)); - case WLAN_CIPHER_SUITE_GCMP: - case WLAN_CIPHER_SUITE_GCMP_256: - BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) != - offsetof(typeof(*seq), gcmp)); - pn64 = atomic64_read(&key->conf.tx_pn); - seq->ccmp.pn[5] = pn64; - seq->ccmp.pn[4] = pn64 >> 8; - seq->ccmp.pn[3] = pn64 >> 16; - seq->ccmp.pn[2] = pn64 >> 24; - seq->ccmp.pn[1] = pn64 >> 32; - seq->ccmp.pn[0] = pn64 >> 40; - break; - default: - WARN_ON(1); - } -} -EXPORT_SYMBOL(ieee80211_get_key_tx_seq); - void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { @@ -1030,48 +985,6 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf, } EXPORT_SYMBOL(ieee80211_get_key_rx_seq); -void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf, - struct ieee80211_key_seq *seq) -{ - struct ieee80211_key *key; - u64 pn64; - - key = container_of(keyconf, struct ieee80211_key, conf); - - switch (key->conf.cipher) { - case WLAN_CIPHER_SUITE_TKIP: - pn64 = (u64)seq->tkip.iv16 | ((u64)seq->tkip.iv32 << 16); - atomic64_set(&key->conf.tx_pn, pn64); - break; - case WLAN_CIPHER_SUITE_CCMP: - case WLAN_CIPHER_SUITE_CCMP_256: - case WLAN_CIPHER_SUITE_AES_CMAC: - case WLAN_CIPHER_SUITE_BIP_CMAC_256: - BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) != - offsetof(typeof(*seq), aes_cmac)); - case WLAN_CIPHER_SUITE_BIP_GMAC_128: - case WLAN_CIPHER_SUITE_BIP_GMAC_256: - BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) != - offsetof(typeof(*seq), aes_gmac)); - case WLAN_CIPHER_SUITE_GCMP: - case WLAN_CIPHER_SUITE_GCMP_256: - BUILD_BUG_ON(offsetof(typeof(*seq), ccmp) != - offsetof(typeof(*seq), gcmp)); - pn64 = (u64)seq->ccmp.pn[5] | - ((u64)seq->ccmp.pn[4] << 8) | - ((u64)seq->ccmp.pn[3] << 16) | - ((u64)seq->ccmp.pn[2] << 24) | - ((u64)seq->ccmp.pn[1] << 32) | - ((u64)seq->ccmp.pn[0] << 40); - atomic64_set(&key->conf.tx_pn, pn64); - break; - default: - WARN_ON(1); - break; - } -} -EXPORT_SYMBOL_GPL(ieee80211_set_key_tx_seq); - void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf, int tid, struct ieee80211_key_seq *seq) { -- cgit v1.2.3 From 395174bb07c1dce58fbf2baa3a01bb69f5103c59 Mon Sep 17 00:00:00 2001 From: Loic Poulain Date: Mon, 22 Feb 2016 10:48:03 +0100 Subject: Bluetooth: hci_uart: Add Intel/AG6xx support This driver implements support for iBT2.1 Bluetooth controller embedded in the AG620 communication combo. The controller needs to be configured with bddata and can be patched with a binary patch file (pbn). These operations are performed in manufacturing mode. Signed-off-by: Loic Poulain Signed-off-by: Marcel Holtmann --- drivers/bluetooth/Kconfig | 11 ++ drivers/bluetooth/Makefile | 1 + drivers/bluetooth/hci_ag6xx.c | 326 ++++++++++++++++++++++++++++++++++++++++++ drivers/bluetooth/hci_ldisc.c | 6 + drivers/bluetooth/hci_uart.h | 8 +- 5 files changed, 351 insertions(+), 1 deletion(-) create mode 100644 drivers/bluetooth/hci_ag6xx.c (limited to 'drivers') diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index ec6af1595062..cf50fd2e96df 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -169,6 +169,17 @@ config BT_HCIUART_QCA Say Y here to compile support for QCA protocol. +config BT_HCIUART_AG6XX + bool "Intel AG6XX protocol support" + depends on BT_HCIUART + select BT_HCIUART_H4 + select BT_INTEL + help + The Intel/AG6XX protocol support enables Bluetooth HCI over serial + port interface for Intel ibt 2.1 Bluetooth controllers. + + Say Y here to compile support for Intel AG6XX protocol. + config BT_HCIBCM203X tristate "HCI BCM203x USB driver" depends on USB diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index 07c9cf381e5a..9c18939fc5c9 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -36,6 +36,7 @@ hci_uart-$(CONFIG_BT_HCIUART_3WIRE) += hci_h5.o hci_uart-$(CONFIG_BT_HCIUART_INTEL) += hci_intel.o hci_uart-$(CONFIG_BT_HCIUART_BCM) += hci_bcm.o hci_uart-$(CONFIG_BT_HCIUART_QCA) += hci_qca.o +hci_uart-$(CONFIG_BT_HCIUART_AG6XX) += hci_ag6xx.o hci_uart-objs := $(hci_uart-y) ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c new file mode 100644 index 000000000000..ea65c2d089ed --- /dev/null +++ b/drivers/bluetooth/hci_ag6xx.c @@ -0,0 +1,326 @@ +/* + * + * Bluetooth HCI UART driver for Intel/AG6xx devices + * + * Copyright (C) 2016 Intel Corporation + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "hci_uart.h" +#include "btintel.h" + +struct ag6xx_data { + struct sk_buff *rx_skb; + struct sk_buff_head txq; +}; + +struct pbn_entry { + __le32 addr; + __le32 plen; + __u8 data[0]; +} __packed; + +static int ag6xx_open(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx; + + BT_DBG("hu %p", hu); + + ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL); + if (!ag6xx) + return -ENOMEM; + + skb_queue_head_init(&ag6xx->txq); + + hu->priv = ag6xx; + return 0; +} + +static int ag6xx_close(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ag6xx->txq); + kfree_skb(ag6xx->rx_skb); + kfree(ag6xx); + + hu->priv = NULL; + return 0; +} + +static int ag6xx_flush(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx = hu->priv; + + BT_DBG("hu %p", hu); + + skb_queue_purge(&ag6xx->txq); + return 0; +} + +static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu) +{ + struct ag6xx_data *ag6xx = hu->priv; + struct sk_buff *skb; + + skb = skb_dequeue(&ag6xx->txq); + if (!skb) + return skb; + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); + return skb; +} + +static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb) +{ + struct ag6xx_data *ag6xx = hu->priv; + + skb_queue_tail(&ag6xx->txq, skb); + return 0; +} + +static const struct h4_recv_pkt ag6xx_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, +}; + +static int ag6xx_recv(struct hci_uart *hu, const void *data, int count) +{ + struct ag6xx_data *ag6xx = hu->priv; + + if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) + return -EUNATCH; + + ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count, + ag6xx_recv_pkts, + ARRAY_SIZE(ag6xx_recv_pkts)); + if (IS_ERR(ag6xx->rx_skb)) { + int err = PTR_ERR(ag6xx->rx_skb); + bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); + ag6xx->rx_skb = NULL; + return err; + } + + return count; +} + +static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen, + const void *data) +{ + /* Can write a maximum of 247 bytes per HCI command. + * HCI cmd Header (3), Intel mem write header (6), data (247). + */ + while (plen > 0) { + struct sk_buff *skb; + u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen; + __le32 leaddr = cpu_to_le32(addr); + + memcpy(cmd_param, &leaddr, 4); + cmd_param[4] = 0; + cmd_param[5] = fragment_len; + memcpy(cmd_param + 6, data, fragment_len); + + skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + kfree_skb(skb); + + plen -= fragment_len; + data += fragment_len; + addr += fragment_len; + } + + return 0; +} + +static int ag6xx_setup(struct hci_uart *hu) +{ + struct hci_dev *hdev = hu->hdev; + struct sk_buff *skb; + struct intel_version ver; + const struct firmware *fw; + const u8 *fw_ptr; + char fwname[64]; + bool patched = false; + int err; + + err = btintel_enter_mfg(hdev); + if (err) + return err; + + err = btintel_read_version(hdev, &ver); + if (err) + return err; + + btintel_version_info(hdev, &ver); + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver.hw_platform != 0x37) { + bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X", + ver.hw_platform); + return -EINVAL; + } + + /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this + * firmware setup method. + */ + if (ver.hw_variant != 0x0a) { + bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x", + ver.hw_variant); + return -EINVAL; + } + + snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata", + ver.hw_platform, ver.hw_variant); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)", + fwname, err); + goto patch; + } + fw_ptr = fw->data; + + bt_dev_info(hdev, "Applying bddata (%s)", fwname); + + skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data, + HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb)); + release_firmware(fw); + return PTR_ERR(skb); + } + kfree_skb(skb); + + release_firmware(fw); + +patch: + /* If there is no applied patch, fw_patch_num is always 0x00. In other + * cases, current firmware is already patched. No need to patch it. + */ + if (ver.fw_patch_num) { + bt_dev_info(hdev, "Device is already patched. patch num: %02x", + ver.fw_patch_num); + patched = true; + goto complete; + } + + snprintf(fwname, sizeof(fwname), + "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn", + ver.hw_platform, ver.hw_variant, ver.hw_revision, + ver.fw_variant, ver.fw_revision, ver.fw_build_num, + ver.fw_build_ww, ver.fw_build_yy); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)", + fwname, err); + goto complete; + } + fw_ptr = fw->data; + + bt_dev_info(hdev, "Patching firmware file (%s)", fwname); + + /* PBN patch file contains a list of binary patches to be applied on top + * of the embedded firmware. Each patch entry header contains the target + * address and patch size. + * + * Patch entry: + * | addr(le) | patch_len(le) | patch_data | + * | 4 Bytes | 4 Bytes | n Bytes | + * + * PBN file is terminated by a patch entry whose address is 0xffffffff. + */ + while (fw->size > fw_ptr - fw->data) { + struct pbn_entry *pbn = (void *)fw_ptr; + u32 addr, plen; + + if (pbn->addr == 0xffffffff) { + bt_dev_info(hdev, "Patching complete"); + patched = true; + break; + } + + addr = le32_to_cpu(pbn->addr); + plen = le32_to_cpu(pbn->plen); + + if (fw->data + fw->size <= pbn->data + plen) { + bt_dev_info(hdev, "Invalid patch len (%d)", plen); + break; + } + + bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data), + fw->size); + + err = intel_mem_write(hdev, addr, plen, pbn->data); + if (err) { + bt_dev_err(hdev, "Patching failed"); + break; + } + + fw_ptr = pbn->data + plen; + } + + release_firmware(fw); + +complete: + /* Exit manufacturing mode and reset */ + err = btintel_exit_mfg(hdev, true, patched); + + return err; +} + +static const struct hci_uart_proto ag6xx_proto = { + .id = HCI_UART_AG6XX, + .name = "AG6XX", + .manufacturer = 2, + .open = ag6xx_open, + .close = ag6xx_close, + .flush = ag6xx_flush, + .setup = ag6xx_setup, + .recv = ag6xx_recv, + .enqueue = ag6xx_enqueue, + .dequeue = ag6xx_dequeue, +}; + +int __init ag6xx_init(void) +{ + return hci_uart_register_proto(&ag6xx_proto); +} + +int __exit ag6xx_deinit(void) +{ + return hci_uart_unregister_proto(&ag6xx_proto); +} diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 73202624133b..c00168a5bb80 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -804,6 +804,9 @@ static int __init hci_uart_init(void) #ifdef CONFIG_BT_HCIUART_QCA qca_init(); #endif +#ifdef CONFIG_BT_HCIUART_AG6XX + ag6xx_init(); +#endif return 0; } @@ -836,6 +839,9 @@ static void __exit hci_uart_exit(void) #ifdef CONFIG_BT_HCIUART_QCA qca_deinit(); #endif +#ifdef CONFIG_BT_HCIUART_AG6XX + ag6xx_deinit(); +#endif /* Release tty registration of line discipline */ err = tty_unregister_ldisc(N_HCI); diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h index 82c92f1b65b4..4814ff08f427 100644 --- a/drivers/bluetooth/hci_uart.h +++ b/drivers/bluetooth/hci_uart.h @@ -35,7 +35,7 @@ #define HCIUARTGETFLAGS _IOR('U', 204, int) /* UART protocols */ -#define HCI_UART_MAX_PROTO 9 +#define HCI_UART_MAX_PROTO 10 #define HCI_UART_H4 0 #define HCI_UART_BCSP 1 @@ -46,6 +46,7 @@ #define HCI_UART_INTEL 6 #define HCI_UART_BCM 7 #define HCI_UART_QCA 8 +#define HCI_UART_AG6XX 9 #define HCI_UART_RAW_DEVICE 0 #define HCI_UART_RESET_ON_INIT 1 @@ -182,3 +183,8 @@ int bcm_deinit(void); int qca_init(void); int qca_deinit(void); #endif + +#ifdef CONFIG_BT_HCIUART_AG6XX +int ag6xx_init(void); +int ag6xx_deinit(void); +#endif -- cgit v1.2.3 From ada68c31ba9c02d7aabdd87db979fe670b499d54 Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Mon, 22 Feb 2016 18:17:23 +0200 Subject: net/mlx5: Introduce a new header file for physical port functions All the device physical port access functions are implemented in the port.c file. We just extract the exposure of these functions from driver.h into a dedicated header file called port.h. Signed-off-by: Achiad Shochat Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx5/main.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/port.c | 1 + include/linux/mlx5/driver.h | 31 ------------ include/linux/mlx5/port.h | 69 ++++++++++++++++++++++++++ 5 files changed, 72 insertions(+), 31 deletions(-) create mode 100644 include/linux/mlx5/port.h (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 03c418ccbc98..e1cea4415704 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index aac071a7e830..15f6cdb842d5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include "wq.h" diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index a87e773e93f3..1e863216ac4a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -32,6 +32,7 @@ #include #include +#include #include #include "mlx5_core.h" diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1e3006dcf35d..02adc67720ce 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -794,37 +794,6 @@ int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); -int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, - int ptys_size, int proto_mask, u8 local_port); -int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, - u32 *proto_cap, int proto_mask); -int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, - u32 *proto_admin, int proto_mask); -int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, - u8 *link_width_oper, u8 local_port); -int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, int proto_mask, - u8 local_port); -int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, - int proto_mask); -int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status status); -int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, - enum mlx5_port_status *status); - -int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); -void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); -void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, - u8 port); - -int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, - u8 *vl_hw_cap, u8 local_port); - -int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); -int mlx5_query_port_pause(struct mlx5_core_dev *dev, - u32 *rx_pause, u32 *tx_pause); - int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h new file mode 100644 index 000000000000..7accd4a65da5 --- /dev/null +++ b/include/linux/mlx5/port.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_PORT_H__ +#define __MLX5_PORT_H__ + +#include + +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); +int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, + int ptys_size, int proto_mask, u8 local_port); +int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, + u32 *proto_cap, int proto_mask); +int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, + u32 *proto_admin, int proto_mask); +int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, + u8 *link_width_oper, u8 local_port); +int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev, + u8 *proto_oper, int proto_mask, + u8 local_port); +int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin, + int proto_mask); +int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status status); +int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, + enum mlx5_port_status *status); + +int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port); +void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port); +void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu, + u8 port); + +int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev, + u8 *vl_hw_cap, u8 local_port); + +int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); +int mlx5_query_port_pause(struct mlx5_core_dev *dev, + u32 *rx_pause, u32 *tx_pause); + +#endif /* __MLX5_PORT_H__ */ -- cgit v1.2.3 From ad909eb064219a64fd10e9c7d9f39a3042760025 Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Mon, 22 Feb 2016 18:17:24 +0200 Subject: net/mlx5: Introduce physical port PFC access functions Add access functions to set and query a physical port PFC (Priority Flow Control) parameters. Signed-off-by: Achiad Shochat Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 41 ++++++++++++++++++++++++++ include/linux/mlx5/port.h | 4 +++ 2 files changed, 45 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 1e863216ac4a..dae70500b6a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -364,3 +364,44 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev, return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_pause); + +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + MLX5_SET(pfcc_reg, in, pfctx, pfc_en_tx); + MLX5_SET(pfcc_reg, in, pfcrx, pfc_en_rx); + MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_tx); + MLX5_SET_TO_ONES(pfcc_reg, in, prio_mask_rx); + + return mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 1); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_pfc); + +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) +{ + u32 in[MLX5_ST_SZ_DW(pfcc_reg)]; + u32 out[MLX5_ST_SZ_DW(pfcc_reg)]; + int err; + + memset(in, 0, sizeof(in)); + MLX5_SET(pfcc_reg, in, local_port, 1); + + err = mlx5_core_access_reg(dev, in, sizeof(in), out, + sizeof(out), MLX5_REG_PFCC, 0, 0); + if (err) + return err; + + if (pfc_en_tx) + *pfc_en_tx = MLX5_GET(pfcc_reg, out, pfctx); + + if (pfc_en_rx) + *pfc_en_rx = MLX5_GET(pfcc_reg, out, pfcrx); + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 7accd4a65da5..4b3644caa936 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -66,4 +66,8 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause); int mlx5_query_port_pause(struct mlx5_core_dev *dev, u32 *rx_pause, u32 *tx_pause); +int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); +int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, + u8 *pfc_en_rx); + #endif /* __MLX5_PORT_H__ */ -- cgit v1.2.3 From 4f3961eeafe0aca8f6b0933899ef0d91f561352d Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 22 Feb 2016 18:17:25 +0200 Subject: net/mlx5: Introduce physical port TC/prio access functions Add access functions to set and query a physical port TC groups and prio parameters. Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 76 ++++++++++++++++++++++++++ include/linux/mlx5/driver.h | 2 + include/linux/mlx5/mlx5_ifc.h | 49 ++++++++++++++++- include/linux/mlx5/port.h | 6 ++ 4 files changed, 132 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index dae70500b6a9..569100d3f57b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -405,3 +405,79 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx) return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_pfc); + +int mlx5_max_tc(struct mlx5_core_dev *mdev) +{ + u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8; + + return num_tc - 1; +} + +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc) +{ + u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + int err; + int i; + + memset(in, 0, sizeof(in)); + for (i = 0; i < 8; i++) { + if (prio_tc[i] > mlx5_max_tc(mdev)) + return -EINVAL; + + MLX5_SET(qtct_reg, in, prio, i); + MLX5_SET(qtct_reg, in, tclass, prio_tc[i]); + + err = mlx5_core_access_reg(mdev, in, sizeof(in), out, + sizeof(out), MLX5_REG_QTCT, 0, 1); + if (err) + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc); + +static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, + int inlen) +{ + u32 out[MLX5_ST_SZ_DW(qtct_reg)]; + + if (!MLX5_CAP_GEN(mdev, ets)) + return -ENOTSUPP; + + return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out), + MLX5_REG_QETCR, 0, 1); +} + +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) +{ + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + int i; + + memset(in, 0, sizeof(in)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + MLX5_SET(qetc_reg, in, tc_configuration[i].g, 1); + MLX5_SET(qetc_reg, in, tc_configuration[i].group, tc_group[i]); + } + + return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); + +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) +{ + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + int i; + + memset(in, 0, sizeof(in)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + MLX5_SET(qetc_reg, in, tc_configuration[i].b, 1); + MLX5_SET(qetc_reg, in, tc_configuration[i].bw_allocation, tc_bw[i]); + } + + return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 02adc67720ce..a815da92d4eb 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -99,6 +99,8 @@ enum { }; enum { + MLX5_REG_QETCR = 0x4005, + MLX5_REG_QTCT = 0x400a, MLX5_REG_PCAP = 0x5001, MLX5_REG_PMTU = 0x5003, MLX5_REG_PTYS = 0x5004, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 51f1e540fc2b..ec957e059de8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -729,7 +729,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_1bf[0x3]; u8 log_max_msg[0x5]; - u8 reserved_at_1c7[0x18]; + u8 reserved_at_1c7[0x4]; + u8 max_tc[0x4]; + u8 reserved_at_1cf[0x10]; u8 stat_rate_support[0x10]; u8 reserved_at_1ef[0xc]; @@ -7061,4 +7063,49 @@ struct mlx5_ifc_modify_flow_table_in_bits { u8 reserved_at_100[0x100]; }; +struct mlx5_ifc_ets_tcn_config_reg_bits { + u8 g[0x1]; + u8 b[0x1]; + u8 r[0x1]; + u8 reserved_at_3[0x9]; + u8 group[0x4]; + u8 reserved_at_10[0x9]; + u8 bw_allocation[0x7]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_ets_global_config_reg_bits { + u8 reserved_at_0[0x2]; + u8 r[0x1]; + u8 reserved_at_3[0x1d]; + + u8 reserved_at_20[0xc]; + u8 max_bw_units[0x4]; + u8 reserved_at_30[0x8]; + u8 max_bw_value[0x8]; +}; + +struct mlx5_ifc_qetc_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0x30]; + + struct mlx5_ifc_ets_tcn_config_reg_bits tc_configuration[0x8]; + struct mlx5_ifc_ets_global_config_reg_bits global_configuration; +}; + +struct mlx5_ifc_qtct_reg_bits { + u8 reserved_at_0[0x8]; + u8 port_number[0x8]; + u8 reserved_at_10[0xd]; + u8 prio[0x3]; + + u8 reserved_at_20[0x1d]; + u8 tclass[0x3]; +}; + #endif /* MLX5_IFC_H */ diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 4b3644caa936..0c67e699d017 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -70,4 +70,10 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx); int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx); +int mlx5_max_tc(struct mlx5_core_dev *mdev); + +int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); +int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); +int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); + #endif /* __MLX5_PORT_H__ */ -- cgit v1.2.3 From 08fb1dacdd763431436d648fd9dc4246e3d5517e Mon Sep 17 00:00:00 2001 From: Saeed Mahameed Date: Mon, 22 Feb 2016 18:17:26 +0200 Subject: net/mlx5e: Support DCBNL IEEE ETS Support the ndo_setup_tc callback and the needed methods for multi TC/UP support, and removed the default_vlan_prio from mlx5e_priv which is always 0, it was replaced with hardcoded "0" in the new select queue method. For that we now create MAX_NUM_TC num of TISs (one per prio) on netdevice creation instead of priv->params.num_tc which was always 1. So far each channel had a single TXQ, Now each channel has a TXQ per TC (Traffic Class). Added en_dcbnl.c which implements the set/get DCBNL IEEE ETS, set/get dcbx and registers the mlx5e dcbnl ops. We still use the kernel's default TXQ selection method to select the channel to transmit through but now we use our own method to select the TXQ inside the channel based on VLAN priority. In mlx5, as opposed to mlx4, tc group N gets lower priority than tc group N+1. CC: John Fastabend Signed-off-by: Saeed Mahameed Signed-off-by: Rana Shahout Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Kconfig | 12 ++ drivers/net/ethernet/mellanox/mlx5/core/Makefile | 3 + drivers/net/ethernet/mellanox/mlx5/core/en.h | 15 +- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 190 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 104 ++++++++++- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 8 +- 6 files changed, 317 insertions(+), 15 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index c503ea05e742..1cf722eba607 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -19,3 +19,15 @@ config MLX5_CORE_EN Ethernet support in Mellanox Technologies ConnectX-4 NIC. Ethernet and Infiniband support in ConnectX-4 are currently mutually exclusive. + +config MLX5_CORE_EN_DCB + bool "Data Center Bridging (DCB) Support" + default y + depends on MLX5_CORE_EN && DCB + ---help--- + Say Y here if you want to use Data Center Bridging (DCB) in the + driver. + If set to N, will not be able to configure QoS and ratelimit attributes. + This flag is depended on the kernel's DCB support. + + If unsure, set to Y diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 01c0256effb8..1a82e23ae120 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -3,6 +3,9 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o + mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ en_txrx.o en_clock.o + +mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 15f6cdb842d5..dfbc4e54efd4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -70,6 +70,11 @@ #define MLX5E_NUM_MAIN_GROUPS 9 +#ifdef CONFIG_MLX5_CORE_EN_DCB +#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ +#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */ +#endif + static const char vport_strings[][ETH_GSTRING_LEN] = { /* vport statistics */ "rx_packets", @@ -273,7 +278,6 @@ struct mlx5e_params { u8 log_sq_size; u8 log_rq_size; u16 num_channels; - u8 default_vlan_prio; u8 num_tc; u16 rx_cq_moderation_usec; u16 rx_cq_moderation_pkts; @@ -286,6 +290,9 @@ struct mlx5e_params { u8 rss_hfunc; u8 toeplitz_hash_key[40]; u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; +#ifdef CONFIG_MLX5_CORE_EN_DCB + struct ieee_ets ets; +#endif }; struct mlx5e_tstamp { @@ -506,7 +513,6 @@ struct mlx5e_flow_tables { struct mlx5e_priv { /* priv data path fields - start */ - int default_vlan_prio; struct mlx5e_sq **txq_to_sq_map; int channeltc_to_txq_map[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC]; /* priv data path fields - end */ @@ -666,4 +672,9 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) } extern const struct ethtool_ops mlx5e_ethtool_ops; +#ifdef CONFIG_MLX5_CORE_EN_DCB +extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops; +int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); +#endif + u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c new file mode 100644 index 000000000000..4f097da7e843 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#include +#include +#include "en.h" + +#define MLX5E_MAX_PRIORITY 8 + +static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!MLX5_CAP_GEN(priv->mdev, ets)) + return -ENOTSUPP; + + memcpy(ets, &priv->params.ets, sizeof(*ets)); + return 0; +} + +enum { + MLX5E_VENDOR_TC_GROUP_NUM = 7, + MLX5E_ETS_TC_GROUP_NUM = 0, +}; + +static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) +{ + bool any_tc_mapped_to_ets = false; + int strict_group; + int i; + + for (i = 0; i <= max_tc; i++) + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + any_tc_mapped_to_ets = true; + + strict_group = any_tc_mapped_to_ets ? 1 : 0; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM; + break; + case IEEE_8021QAZ_TSA_STRICT: + tc_group[i] = strict_group++; + break; + case IEEE_8021QAZ_TSA_ETS: + tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; + break; + } + } +} + +static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, + u8 *tc_group, int max_tc) +{ + int i; + + for (i = 0; i <= max_tc; i++) { + switch (ets->tc_tsa[i]) { + case IEEE_8021QAZ_TSA_VENDOR: + tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + break; + case IEEE_8021QAZ_TSA_STRICT: + tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + break; + case IEEE_8021QAZ_TSA_ETS: + tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC; + break; + } + } +} + +int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) +{ + struct mlx5_core_dev *mdev = priv->mdev; + u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS]; + u8 tc_group[IEEE_8021QAZ_MAX_TCS]; + int max_tc = mlx5_max_tc(mdev); + int err; + + if (!MLX5_CAP_GEN(mdev, ets)) + return -ENOTSUPP; + + mlx5e_build_tc_group(ets, tc_group, max_tc); + mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc); + + err = mlx5_set_port_prio_tc(mdev, ets->prio_tc); + if (err) + return err; + + err = mlx5_set_port_tc_group(mdev, tc_group); + if (err) + return err; + + return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw); +} + +static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets) +{ + int bw_sum = 0; + int i; + + /* Validate Priority */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) + return -EINVAL; + } + + /* Validate Bandwidth Sum */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { + if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) + bw_sum += ets->tc_tx_bw[i]; + } + + if (bw_sum != 0 && bw_sum != 100) + return -EINVAL; + return 0; +} + +static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, + struct ieee_ets *ets) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int err; + + err = mlx5e_dbcnl_validate_ets(ets); + if (err) + return err; + + err = mlx5e_dcbnl_ieee_setets_core(priv, ets); + if (err) + return err; + + memcpy(&priv->params.ets, ets, sizeof(*ets)); + priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; + + return 0; +} + +static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) +{ + return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; +} + +static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) +{ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || + (mode & DCB_CAP_DCBX_VER_CEE) || + !(mode & DCB_CAP_DCBX_VER_IEEE) || + !(mode & DCB_CAP_DCBX_HOST)) + return 1; + + return 0; +} + +const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { + .ieee_getets = mlx5e_dcbnl_ieee_getets, + .ieee_setets = mlx5e_dcbnl_ieee_setets, + .getdcbx = mlx5e_dcbnl_getdcbx, + .setdcbx = mlx5e_dcbnl_setdcbx, +}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d4e1c3045200..704d75c3c99d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1400,6 +1400,24 @@ static int mlx5e_set_dev_port_mtu(struct net_device *netdev) return 0; } +static void mlx5e_netdev_set_tcs(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + int nch = priv->params.num_channels; + int ntc = priv->params.num_tc; + int tc; + + netdev_reset_tc(netdev); + + if (ntc == 1) + return; + + netdev_set_num_tc(netdev, ntc); + + for (tc = 0; tc < ntc; tc++) + netdev_set_tc_queue(netdev, tc, nch, tc * nch); +} + int mlx5e_open_locked(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1408,6 +1426,8 @@ int mlx5e_open_locked(struct net_device *netdev) set_bit(MLX5E_STATE_OPENED, &priv->state); + mlx5e_netdev_set_tcs(netdev); + num_txqs = priv->params.num_channels * priv->params.num_tc; netif_set_real_num_tx_queues(netdev, num_txqs); netif_set_real_num_rx_queues(netdev, priv->params.num_channels); @@ -1602,7 +1622,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc) memset(in, 0, sizeof(in)); - MLX5_SET(tisc, tisc, prio, tc); + MLX5_SET(tisc, tisc, prio, tc << 1); MLX5_SET(tisc, tisc, transport_domain, priv->tdn); return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]); @@ -1618,7 +1638,7 @@ static int mlx5e_create_tises(struct mlx5e_priv *priv) int err; int tc; - for (tc = 0; tc < priv->params.num_tc; tc++) { + for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) { err = mlx5e_create_tis(priv, tc); if (err) goto err_close_tises; @@ -1637,7 +1657,7 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv) { int tc; - for (tc = 0; tc < priv->params.num_tc; tc++) + for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) mlx5e_destroy_tis(priv, tc); } @@ -1824,6 +1844,40 @@ static void mlx5e_destroy_tirs(struct mlx5e_priv *priv) mlx5e_destroy_tir(priv, i); } +static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + bool was_opened; + int err = 0; + + if (tc && tc != MLX5E_MAX_NUM_TC) + return -EINVAL; + + mutex_lock(&priv->state_lock); + + was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (was_opened) + mlx5e_close_locked(priv->netdev); + + priv->params.num_tc = tc ? tc : 1; + + if (was_opened) + err = mlx5e_open_locked(priv->netdev); + + mutex_unlock(&priv->state_lock); + + return err; +} + +static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, + __be16 proto, struct tc_to_netdev *tc) +{ + if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + return -EINVAL; + + return mlx5e_setup_tc(dev, tc->tc); +} + static struct rtnl_link_stats64 * mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { @@ -2028,6 +2082,8 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, + .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, .ndo_set_mac_address = mlx5e_set_mac, @@ -2042,6 +2098,8 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, + .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, .ndo_set_mac_address = mlx5e_set_mac, @@ -2089,6 +2147,24 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } +#ifdef CONFIG_MLX5_CORE_EN_DCB +static void mlx5e_ets_init(struct mlx5e_priv *priv) +{ + int i; + + priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < priv->params.ets.ets_cap; i++) { + priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; + priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; + priv->params.ets.prio_tc[i] = i; + } + + /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ + priv->params.ets.prio_tc[0] = 1; + priv->params.ets.prio_tc[1] = 0; +} +#endif + static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, struct net_device *netdev, int num_channels) @@ -2112,7 +2188,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->params.min_rx_wqes = MLX5E_PARAMS_DEFAULT_MIN_RX_WQES; priv->params.num_tc = 1; - priv->params.default_vlan_prio = 0; priv->params.rss_hfunc = ETH_RSS_HASH_XOR; netdev_rss_key_fill(priv->params.toeplitz_hash_key, @@ -2127,7 +2202,10 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, priv->mdev = mdev; priv->netdev = netdev; priv->params.num_channels = num_channels; - priv->default_vlan_prio = priv->params.default_vlan_prio; + +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_ets_init(priv); +#endif spin_lock_init(&priv->async_events_spinlock); mutex_init(&priv->state_lock); @@ -2156,10 +2234,14 @@ static void mlx5e_build_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_CAP_GEN(mdev, vport_group_manager)) { netdev->netdev_ops = &mlx5e_netdev_ops_sriov; - else +#ifdef CONFIG_MLX5_CORE_EN_DCB + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; +#endif + } else { netdev->netdev_ops = &mlx5e_netdev_ops_basic; + } netdev->watchdog_timeo = 15 * HZ; @@ -2228,7 +2310,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) if (mlx5e_check_required_hca_cap(mdev)) return NULL; - netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), nch, nch); + netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv), + nch * MLX5E_MAX_NUM_TC, + nch); if (!netdev) { mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n"); return NULL; @@ -2303,6 +2387,10 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) mlx5e_init_eth_addr(priv); +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); +#endif + err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 2c3fba0fff54..00d855ae03c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -109,12 +109,10 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, { struct mlx5e_priv *priv = netdev_priv(dev); int channel_ix = fallback(dev, skb); - int up = skb_vlan_tag_present(skb) ? - skb->vlan_tci >> VLAN_PRIO_SHIFT : - priv->default_vlan_prio; - int tc = netdev_get_prio_tc_map(dev, up); + int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ? + skb->vlan_tci >> VLAN_PRIO_SHIFT : 0; - return priv->channeltc_to_txq_map[channel_ix][tc]; + return priv->channeltc_to_txq_map[channel_ix][up]; } static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq, -- cgit v1.2.3 From ef9184335e4da720b374a66fe4f80a32982519ca Mon Sep 17 00:00:00 2001 From: Achiad Shochat Date: Mon, 22 Feb 2016 18:17:27 +0200 Subject: net/mlx5e: Support DCBNL IEEE PFC Implement the set/get DCBNL IEEE PFC callbacks. Signed-off-by: Achiad Shochat Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 39 ++++++++++++++++++++++ 1 file changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 4f097da7e843..39d8069ba9e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -166,6 +166,43 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev, return 0; } +static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + + pfc->pfc_cap = mlx5_max_tc(mdev) + 1; + + return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL); +} + +static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev, + struct ieee_pfc *pfc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev = priv->mdev; + enum mlx5_port_status ps; + u8 curr_pfc_en; + int ret; + + mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL); + + if (pfc->pfc_en == curr_pfc_en) + return 0; + + mlx5_query_port_admin_status(mdev, &ps); + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + + ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en); + + if (ps == MLX5_PORT_UP) + mlx5_set_port_admin_status(mdev, MLX5_PORT_UP); + + return ret; +} + static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev) { return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; @@ -185,6 +222,8 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_getets = mlx5e_dcbnl_ieee_getets, .ieee_setets = mlx5e_dcbnl_ieee_setets, + .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc, + .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc, .getdcbx = mlx5e_dcbnl_getdcbx, .setdcbx = mlx5e_dcbnl_setdcbx, }; -- cgit v1.2.3 From d8880795dabf2381ed1e98348f6d9c7ea6fab950 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 22 Feb 2016 18:17:28 +0200 Subject: net/mlx5e: Implement DCBNL IEEE max rate Add support for DCBNL IEEE get/set max rate. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | 73 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/port.c | 65 +++++++++++++++++++ include/linux/mlx5/device.h | 6 ++ include/linux/mlx5/port.h | 6 ++ 4 files changed, 150 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 39d8069ba9e3..3036f279a8fd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -35,6 +35,9 @@ #define MLX5E_MAX_PRIORITY 8 +#define MLX5E_100MB (100000) +#define MLX5E_1GB (1000000) + static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, struct ieee_ets *ets) { @@ -219,9 +222,79 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode) return 0; } +static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; + int err; + int i; + + err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); + if (err) + return err; + + memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + switch (max_bw_unit[i]) { + case MLX5_100_MBPS_UNIT: + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB; + break; + case MLX5_GBPS_UNIT: + maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB; + break; + case MLX5_BW_NO_LIMIT: + break; + default: + WARN(true, "non-supported BW unit"); + break; + } + } + + return 0; +} + +static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev, + struct ieee_maxrate *maxrate) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 max_bw_value[IEEE_8021QAZ_MAX_TCS]; + u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS]; + __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB); + int i; + + memset(max_bw_value, 0, sizeof(max_bw_value)); + memset(max_bw_unit, 0, sizeof(max_bw_unit)); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + if (!maxrate->tc_maxrate[i]) { + max_bw_unit[i] = MLX5_BW_NO_LIMIT; + continue; + } + if (maxrate->tc_maxrate[i] < upper_limit_mbps) { + max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], + MLX5E_100MB); + max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1; + max_bw_unit[i] = MLX5_100_MBPS_UNIT; + } else { + max_bw_value[i] = div_u64(maxrate->tc_maxrate[i], + MLX5E_1GB); + max_bw_unit[i] = MLX5_GBPS_UNIT; + } + } + + return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit); +} + const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = { .ieee_getets = mlx5e_dcbnl_ieee_getets, .ieee_setets = mlx5e_dcbnl_ieee_setets, + .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate, + .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate, .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc, .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc, .getdcbx = mlx5e_dcbnl_getdcbx, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 569100d3f57b..d97605ef3efd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -450,6 +450,19 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, MLX5_REG_QETCR, 0, 1); } +static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, + int outlen) +{ + u32 in[MLX5_ST_SZ_DW(qtct_reg)]; + + if (!MLX5_CAP_GEN(mdev, ets)) + return -ENOTSUPP; + + memset(in, 0, sizeof(in)); + return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen, + MLX5_REG_QETCR, 0, 0); +} + int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) { u32 in[MLX5_ST_SZ_DW(qetc_reg)]; @@ -481,3 +494,55 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); } EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc); + +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_units) +{ + u32 in[MLX5_ST_SZ_DW(qetc_reg)]; + void *ets_tcn_conf; + int i; + + memset(in, 0, sizeof(in)); + + MLX5_SET(qetc_reg, in, port_number, 1); + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, in, tc_configuration[i]); + + MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, r, 1); + MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_units, + max_bw_units[i]); + MLX5_SET(ets_tcn_config_reg, ets_tcn_conf, max_bw_value, + max_bw_value[i]); + } + + return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in)); +} +EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit); + +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_units) +{ + u32 out[MLX5_ST_SZ_DW(qetc_reg)]; + void *ets_tcn_conf; + int err; + int i; + + err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); + if (err) + return err; + + for (i = 0; i <= mlx5_max_tc(mdev); i++) { + ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, tc_configuration[i]); + + max_bw_value[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, + max_bw_value); + max_bw_units[i] = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, + max_bw_units); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 987764afa65c..bfc1ab0552d3 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -350,6 +350,12 @@ enum { MLX5_SET_PORT_PKEY_TABLE = 20, }; +enum { + MLX5_BW_NO_LIMIT = 0, + MLX5_100_MBPS_UNIT = 3, + MLX5_GBPS_UNIT = 4, +}; + enum { MLX5_MAX_PAGE_SHIFT = 31 }; diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 0c67e699d017..595c7b2d9bfa 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -75,5 +75,11 @@ int mlx5_max_tc(struct mlx5_core_dev *mdev); int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); +int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); +int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, + u8 *max_bw_value, + u8 *max_bw_unit); #endif /* __MLX5_PORT_H__ */ -- cgit v1.2.3 From 928cfe8745a62e60c1e8e06676a74724e7786024 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Mon, 22 Feb 2016 18:17:29 +0200 Subject: net/mlx5e: Wake On LAN support Implement set/get WOL by ethtool and added the needed device commands and structures to mlx5_ifc. Signed-off-by: Tariq Toukan Signed-off-by: Rana Shahout Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 6 + .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 125 +++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/port.c | 38 +++++++ include/linux/mlx5/device.h | 11 ++ include/linux/mlx5/mlx5_ifc.h | 62 +++++++++- include/linux/mlx5/port.h | 2 + 6 files changed, 243 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 037fc4cdf5af..9ce87c624450 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -560,6 +560,12 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_ACCESS_REG: return "MLX5_CMD_OP_ACCESS_REG"; + case MLX5_CMD_OP_SET_WOL_ROL: + return "SET_WOL_ROL"; + + case MLX5_CMD_OP_QUERY_WOL_ROL: + return "QUERY_WOL_ROL"; + default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 65624ac65b4c..e9760f895744 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -884,6 +884,129 @@ static int mlx5e_get_ts_info(struct net_device *dev, return 0; } +static __u32 mlx5e_get_wol_supported(struct mlx5_core_dev *mdev) +{ + __u32 ret = 0; + + if (MLX5_CAP_GEN(mdev, wol_g)) + ret |= WAKE_MAGIC; + + if (MLX5_CAP_GEN(mdev, wol_s)) + ret |= WAKE_MAGICSECURE; + + if (MLX5_CAP_GEN(mdev, wol_a)) + ret |= WAKE_ARP; + + if (MLX5_CAP_GEN(mdev, wol_b)) + ret |= WAKE_BCAST; + + if (MLX5_CAP_GEN(mdev, wol_m)) + ret |= WAKE_MCAST; + + if (MLX5_CAP_GEN(mdev, wol_u)) + ret |= WAKE_UCAST; + + if (MLX5_CAP_GEN(mdev, wol_p)) + ret |= WAKE_PHY; + + return ret; +} + +static __u32 mlx5e_refomrat_wol_mode_mlx5_to_linux(u8 mode) +{ + __u32 ret = 0; + + if (mode & MLX5_WOL_MAGIC) + ret |= WAKE_MAGIC; + + if (mode & MLX5_WOL_SECURED_MAGIC) + ret |= WAKE_MAGICSECURE; + + if (mode & MLX5_WOL_ARP) + ret |= WAKE_ARP; + + if (mode & MLX5_WOL_BROADCAST) + ret |= WAKE_BCAST; + + if (mode & MLX5_WOL_MULTICAST) + ret |= WAKE_MCAST; + + if (mode & MLX5_WOL_UNICAST) + ret |= WAKE_UCAST; + + if (mode & MLX5_WOL_PHY_ACTIVITY) + ret |= WAKE_PHY; + + return ret; +} + +static u8 mlx5e_refomrat_wol_mode_linux_to_mlx5(__u32 mode) +{ + u8 ret = 0; + + if (mode & WAKE_MAGIC) + ret |= MLX5_WOL_MAGIC; + + if (mode & WAKE_MAGICSECURE) + ret |= MLX5_WOL_SECURED_MAGIC; + + if (mode & WAKE_ARP) + ret |= MLX5_WOL_ARP; + + if (mode & WAKE_BCAST) + ret |= MLX5_WOL_BROADCAST; + + if (mode & WAKE_MCAST) + ret |= MLX5_WOL_MULTICAST; + + if (mode & WAKE_UCAST) + ret |= MLX5_WOL_UNICAST; + + if (mode & WAKE_PHY) + ret |= MLX5_WOL_PHY_ACTIVITY; + + return ret; +} + +static void mlx5e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + u8 mlx5_wol_mode; + int err; + + memset(wol, 0, sizeof(*wol)); + + wol->supported = mlx5e_get_wol_supported(mdev); + if (!wol->supported) + return; + + err = mlx5_query_port_wol(mdev, &mlx5_wol_mode); + if (err) + return; + + wol->wolopts = mlx5e_refomrat_wol_mode_mlx5_to_linux(mlx5_wol_mode); +} + +static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + __u32 wol_supported = mlx5e_get_wol_supported(mdev); + u32 mlx5_wol_mode; + + if (!wol_supported) + return -ENOTSUPP; + + if (wol->wolopts & ~wol_supported) + return -EINVAL; + + mlx5_wol_mode = mlx5e_refomrat_wol_mode_linux_to_mlx5(wol->wolopts); + + return mlx5_set_port_wol(mdev, mlx5_wol_mode); +} + const struct ethtool_ops mlx5e_ethtool_ops = { .get_drvinfo = mlx5e_get_drvinfo, .get_link = ethtool_op_get_link, @@ -908,4 +1031,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_pauseparam = mlx5e_get_pauseparam, .set_pauseparam = mlx5e_set_pauseparam, .get_ts_info = mlx5e_get_ts_info, + .get_wol = mlx5e_get_wol, + .set_wol = mlx5e_set_wol, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index d97605ef3efd..e1f2e1059cfd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -546,3 +546,41 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, return 0; } EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit); + +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode) +{ + u32 in[MLX5_ST_SZ_DW(set_wol_rol_in)]; + u32 out[MLX5_ST_SZ_DW(set_wol_rol_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(set_wol_rol_in, in, opcode, MLX5_CMD_OP_SET_WOL_ROL); + MLX5_SET(set_wol_rol_in, in, wol_mode_valid, 1); + MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), + out, sizeof(out)); +} +EXPORT_SYMBOL_GPL(mlx5_set_port_wol); + +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode) +{ + u32 in[MLX5_ST_SZ_DW(query_wol_rol_in)]; + u32 out[MLX5_ST_SZ_DW(query_wol_rol_out)]; + int err; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(query_wol_rol_in, in, opcode, MLX5_CMD_OP_QUERY_WOL_ROL); + + err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), + out, sizeof(out)); + + if (!err) + *wol_mode = MLX5_GET(query_wol_rol_out, out, wol_mode); + + return err; +} +EXPORT_SYMBOL_GPL(mlx5_query_port_wol); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index bfc1ab0552d3..68a56bc37df2 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1183,6 +1183,17 @@ enum { MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, }; +enum mlx5_wol_mode { + MLX5_WOL_DISABLE = 0, + MLX5_WOL_SECURED_MAGIC = 1 << 1, + MLX5_WOL_MAGIC = 1 << 2, + MLX5_WOL_ARP = 1 << 3, + MLX5_WOL_BROADCAST = 1 << 4, + MLX5_WOL_MULTICAST = 1 << 5, + MLX5_WOL_UNICAST = 1 << 6, + MLX5_WOL_PHY_ACTIVITY = 1 << 7, +}; + /* MLX5 DEV CAPs */ /* TODO: EAT.ME */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ec957e059de8..03ffe9530365 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -166,6 +166,8 @@ enum { MLX5_CMD_OP_SET_L2_TABLE_ENTRY = 0x829, MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY = 0x82a, MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY = 0x82b, + MLX5_CMD_OP_SET_WOL_ROL = 0x830, + MLX5_CMD_OP_QUERY_WOL_ROL = 0x831, MLX5_CMD_OP_CREATE_TIR = 0x900, MLX5_CMD_OP_MODIFY_TIR = 0x901, MLX5_CMD_OP_DESTROY_TIR = 0x902, @@ -731,7 +733,17 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_max_msg[0x5]; u8 reserved_at_1c7[0x4]; u8 max_tc[0x4]; - u8 reserved_at_1cf[0x10]; + u8 reserved_at_1cf[0x6]; + u8 rol_s[0x1]; + u8 rol_g[0x1]; + u8 reserved_at_1d7[0x1]; + u8 wol_s[0x1]; + u8 wol_g[0x1]; + u8 wol_a[0x1]; + u8 wol_b[0x1]; + u8 wol_m[0x1]; + u8 wol_u[0x1]; + u8 wol_p[0x1]; u8 stat_rate_support[0x10]; u8 reserved_at_1ef[0xc]; @@ -6873,6 +6885,54 @@ struct mlx5_ifc_mtt_bits { u8 rd_en[0x1]; }; +struct mlx5_ifc_query_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x10]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + +struct mlx5_ifc_query_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x40]; +}; + +struct mlx5_ifc_set_wol_rol_in_bits { + u8 opcode[0x10]; + u8 reserved_at_10[0x10]; + + u8 reserved_at_20[0x10]; + u8 op_mod[0x10]; + + u8 rol_mode_valid[0x1]; + u8 wol_mode_valid[0x1]; + u8 reserved_at_42[0xe]; + u8 rol_mode[0x8]; + u8 wol_mode[0x8]; + + u8 reserved_at_60[0x20]; +}; + enum { MLX5_INITIAL_SEG_NIC_INTERFACE_FULL_DRIVER = 0x0, MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED = 0x1, diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 595c7b2d9bfa..a1d145abd4eb 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -81,5 +81,7 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev, int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, u8 *max_bw_value, u8 *max_bw_unit); +int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); +int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); #endif /* __MLX5_PORT_H__ */ -- cgit v1.2.3 From 5f6d12d10f0c835fcd821f883f05c1107127986e Mon Sep 17 00:00:00 2001 From: Matthew Finlay Date: Mon, 22 Feb 2016 18:17:30 +0200 Subject: net/mlx5e: Move to checksum complete Use checksum complete for all IP packets, unless they are HW LRO, in which case, use checksum unnecessary. Signed-off-by: Matthew Finlay Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index dd959d929aad..519a07f253f9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -167,14 +167,15 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb) static inline void mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, - struct sk_buff *skb) + struct sk_buff *skb, + bool lro) { if (unlikely(!(netdev->features & NETIF_F_RXCSUM))) goto csum_none; - if (likely(cqe->hds_ip_ext & CQE_L4_OK)) { + if (lro) { skb->ip_summed = CHECKSUM_UNNECESSARY; - } else if (is_first_ethertype_ip(skb)) { + } else if (likely(is_first_ethertype_ip(skb))) { skb->ip_summed = CHECKSUM_COMPLETE; skb->csum = csum_unfold((__force __sum16)cqe->check_sum); rq->stats.csum_sw++; @@ -211,7 +212,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (unlikely(mlx5e_rx_hw_stamp(tstamp))) mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb)); - mlx5e_handle_csum(netdev, cqe, rq, skb); + mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg); skb->protocol = eth_type_trans(skb, netdev); -- cgit v1.2.3 From 1afff42c062fe1d1f8d99423dddfb8ca5f100574 Mon Sep 17 00:00:00 2001 From: Matthew Finlay Date: Mon, 22 Feb 2016 18:17:31 +0200 Subject: net/mlx5e: Protect en header file from redefinitions add ifndef to en.h. needed for upcoming vxlan patchset. Signed-off-by: Matthew Finlay Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index dfbc4e54efd4..786a2471ec0e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -29,6 +29,8 @@ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ +#ifndef __MLX5_EN_H__ +#define __MLX5_EN_H__ #include #include @@ -678,3 +680,5 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets); #endif u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev); + +#endif /* __MLX5_EN_H__ */ -- cgit v1.2.3 From b3f63c3d5e2cbb9c800516ef47e32d8cb0cf237b Mon Sep 17 00:00:00 2001 From: Matthew Finlay Date: Mon, 22 Feb 2016 18:17:32 +0200 Subject: net/mlx5e: Add netdev support for VXLAN tunneling If a VXLAN udp dport is added to device it will: - Configure the hardware to offload the port (up to the max supported). - Advertise NETIF_F_GSO_UDP_TUNNEL and supported hw_enc_features. Signed-off-by: Matthew Finlay Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 8 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 + drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 95 +++++++++++- drivers/net/ethernet/mellanox/mlx5/core/vxlan.c | 170 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/vxlan.h | 54 +++++++ 6 files changed, 332 insertions(+), 3 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vxlan.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/vxlan.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 1a82e23ae120..11b592dbf16a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,6 +6,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o + en_txrx.o en_clock.o vxlan.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 9ce87c624450..97f5114fc113 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -566,6 +566,12 @@ const char *mlx5_command_str(int command) case MLX5_CMD_OP_QUERY_WOL_ROL: return "QUERY_WOL_ROL"; + case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT: + return "ADD_VXLAN_UDP_DPORT"; + + case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT: + return "DELETE_VXLAN_UDP_DPORT"; + default: return "unknown command opcode"; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 786a2471ec0e..a700c5713226 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -501,6 +501,11 @@ struct mlx5e_vlan_db { bool filter_disabled; }; +struct mlx5e_vxlan_db { + spinlock_t lock; /* protect vxlan table */ + struct radix_tree_root tree; +}; + struct mlx5e_flow_table { int num_groups; struct mlx5_flow_table *t; @@ -535,6 +540,7 @@ struct mlx5e_priv { struct mlx5e_flow_tables fts; struct mlx5e_eth_addr_db eth_addr; struct mlx5e_vlan_db vlan; + struct mlx5e_vxlan_db vxlan; struct mlx5e_params params; spinlock_t async_events_spinlock; /* sync hw events */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 704d75c3c99d..6f7eb3b21e2b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -31,8 +31,10 @@ */ #include +#include #include "en.h" #include "eswitch.h" +#include "vxlan.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -2078,6 +2080,78 @@ static int mlx5e_get_vf_stats(struct net_device *dev, vf_stats); } +static void mlx5e_add_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!mlx5e_vxlan_allowed(priv->mdev)) + return; + + mlx5e_vxlan_add_port(priv, be16_to_cpu(port)); +} + +static void mlx5e_del_vxlan_port(struct net_device *netdev, + sa_family_t sa_family, __be16 port) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + if (!mlx5e_vxlan_allowed(priv->mdev)) + return; + + mlx5e_vxlan_del_port(priv, be16_to_cpu(port)); +} + +static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, + struct sk_buff *skb, + netdev_features_t features) +{ + struct udphdr *udph; + u16 proto; + u16 port = 0; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + proto = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + proto = ipv6_hdr(skb)->nexthdr; + break; + default: + goto out; + } + + if (proto == IPPROTO_UDP) { + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + } + + /* Verify if UDP port is being offloaded by HW */ + if (port && mlx5e_vxlan_lookup_port(priv, port)) + return features; + +out: + /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); +} + +static netdev_features_t mlx5e_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + /* Validate if the tunneled packet is being offloaded by HW */ + if (skb->encapsulation && + (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) + return mlx5e_vxlan_features_check(priv, skb, features); + + return features; +} + static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, @@ -2108,6 +2182,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_set_features = mlx5e_set_features, .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, + .ndo_add_vxlan_port = mlx5e_add_vxlan_port, + .ndo_del_vxlan_port = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_get_vf_config = mlx5e_get_vf_config, @@ -2264,6 +2341,16 @@ static void mlx5e_build_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + if (mlx5e_vxlan_allowed(mdev)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + netdev->hw_enc_features |= NETIF_F_IP_CSUM; + netdev->hw_enc_features |= NETIF_F_RXCSUM; + netdev->hw_enc_features |= NETIF_F_TSO; + netdev->hw_enc_features |= NETIF_F_TSO6; + netdev->hw_enc_features |= NETIF_F_RXHASH; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; + } + netdev->features = netdev->hw_features; if (!priv->params.lro_en) netdev->features &= ~NETIF_F_LRO; @@ -2387,6 +2474,8 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) mlx5e_init_eth_addr(priv); + mlx5e_vxlan_init(priv); + #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); #endif @@ -2397,6 +2486,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) goto err_destroy_flow_tables; } + if (mlx5e_vxlan_allowed(mdev)) + vxlan_get_rx_port(netdev); + mlx5e_enable_async_events(priv); schedule_work(&priv->set_rx_mode_work); @@ -2449,6 +2541,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_disable_async_events(priv); flush_scheduled_work(); unregister_netdev(netdev); + mlx5e_vxlan_cleanup(priv); mlx5e_destroy_flow_tables(priv); mlx5e_destroy_tirs(priv); mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c new file mode 100644 index 000000000000..9f10df25f3cd --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "mlx5_core.h" +#include "vxlan.h" + +void mlx5e_vxlan_init(struct mlx5e_priv *priv) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + + spin_lock_init(&vxlan_db->lock); + INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC); +} + +static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) +{ + struct mlx5_outbox_hdr *hdr; + int err; + + u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)]; + u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)]; + + memset(in, 0, sizeof(in)); + memset(out, 0, sizeof(out)); + + MLX5_SET(add_vxlan_udp_dport_in, in, opcode, + MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); + MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); + + err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); + if (err) + return err; + + hdr = (struct mlx5_outbox_hdr *)out; + return hdr->status ? -ENOMEM : 0; +} + +static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) +{ + u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)]; + u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)]; + + memset(&in, 0, sizeof(in)); + memset(&out, 0, sizeof(out)); + + MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, + MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); + MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); + + return mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, + sizeof(out)); +} + +struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + + spin_lock(&vxlan_db->lock); + vxlan = radix_tree_lookup(&vxlan_db->tree, port); + spin_unlock(&vxlan_db->lock); + + return vxlan; +} + +int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + int err; + + err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port); + if (err) + return err; + + vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); + if (!vxlan) { + err = -ENOMEM; + goto err_delete_port; + } + + vxlan->udp_port = port; + + spin_lock_irq(&vxlan_db->lock); + err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan); + spin_unlock_irq(&vxlan_db->lock); + if (err) + goto err_free; + + return 0; + +err_free: + kfree(vxlan); +err_delete_port: + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port); + return err; +} + +static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + + spin_lock_irq(&vxlan_db->lock); + vxlan = radix_tree_delete(&vxlan_db->tree, port); + spin_unlock_irq(&vxlan_db->lock); + + if (!vxlan) + return; + + mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port); + + kfree(vxlan); +} + +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port) +{ + if (!mlx5e_vxlan_lookup_port(priv, port)) + return; + + __mlx5e_vxlan_core_del_port(priv, port); +} + +void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan; + struct mlx5e_vxlan *vxlan; + unsigned int port = 0; + + spin_lock_irq(&vxlan_db->lock); + while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) { + port = vxlan->udp_port; + spin_unlock_irq(&vxlan_db->lock); + __mlx5e_vxlan_core_del_port(priv, (u16)port); + spin_lock_irq(&vxlan_db->lock); + } + spin_unlock_irq(&vxlan_db->lock); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h new file mode 100644 index 000000000000..a01685056ab1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ +#ifndef __MLX5_VXLAN_H__ +#define __MLX5_VXLAN_H__ + +#include +#include "en.h" + +struct mlx5e_vxlan { + u16 udp_port; +}; + +static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev) +{ + return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) && + mlx5_core_is_pf(mdev)); +} + +void mlx5e_vxlan_init(struct mlx5e_priv *priv); +int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port); +void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port); +struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port); +void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv); + +#endif /* __MLX5_VXLAN_H__ */ -- cgit v1.2.3 From 9879515895ffe46a424d00ad5ce92e32fd61790b Mon Sep 17 00:00:00 2001 From: Matthew Finlay Date: Mon, 22 Feb 2016 18:17:33 +0200 Subject: net/mlx5e: Add TX stateless offloads for tunneling Add support for TSO and TX checksum when using hw assisted, tunneled offloads. Signed-off-by: Matthew Finlay Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 00d855ae03c2..6ce2884388cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Mellanox Technologies. All rights reserved. + * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -185,9 +185,14 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) memset(wqe, 0, sizeof(*wqe)); - if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) - eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; - else + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; + if (skb->encapsulation) + eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | + MLX5_ETH_WQE_L4_INNER_CSUM; + else + eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; + } else sq->stats.csum_offload_none++; if (sq->cc != sq->prev_cc) { @@ -200,8 +205,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); opcode = MLX5_OPCODE_LSO; - ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); - payload_len = skb->len - ihs; + + if (skb->encapsulation) + ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); + else + ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + + payload_len = skb->len - ihs; wi->num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; sq->stats.tso_packets++; -- cgit v1.2.3 From 89db09eb5979b74f1f50ee2fb2ce519ee8414c48 Mon Sep 17 00:00:00 2001 From: Matthew Finlay Date: Mon, 22 Feb 2016 18:17:34 +0200 Subject: net/mlx5e: Add TX inner packet counters Add TSO and TX checksum counters for tunneled, inner packets Signed-off-by: Matthew Finlay Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 16 ++++++++++++++-- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 8 +++++++- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 20 +++++++++++--------- 3 files changed, 32 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index a700c5713226..1dca3dcf90f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -103,12 +103,15 @@ static const char vport_strings[][ETH_GSTRING_LEN] = { /* SW counters */ "tso_packets", "tso_bytes", + "tso_inner_packets", + "tso_inner_bytes", "lro_packets", "lro_bytes", "rx_csum_good", "rx_csum_none", "rx_csum_sw", "tx_csum_offload", + "tx_csum_inner", "tx_queue_stopped", "tx_queue_wake", "tx_queue_dropped", @@ -141,18 +144,21 @@ struct mlx5e_vport_stats { /* SW counters */ u64 tso_packets; u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; u64 lro_packets; u64 lro_bytes; u64 rx_csum_good; u64 rx_csum_none; u64 rx_csum_sw; u64 tx_csum_offload; + u64 tx_csum_inner; u64 tx_queue_stopped; u64 tx_queue_wake; u64 tx_queue_dropped; u64 rx_wqe_err; -#define NUM_VPORT_COUNTERS 32 +#define NUM_VPORT_COUNTERS 35 }; static const char pport_strings[][ETH_GSTRING_LEN] = { @@ -252,7 +258,10 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = { "packets", "tso_packets", "tso_bytes", + "tso_inner_packets", + "tso_inner_bytes", "csum_offload_none", + "csum_offload_inner", "stopped", "wake", "dropped", @@ -263,12 +272,15 @@ struct mlx5e_sq_stats { u64 packets; u64 tso_packets; u64 tso_bytes; + u64 tso_inner_packets; + u64 tso_inner_bytes; u64 csum_offload_none; + u64 csum_offload_inner; u64 stopped; u64 wake; u64 dropped; u64 nop; -#define NUM_SQ_STATS 8 +#define NUM_SQ_STATS 11 }; struct mlx5e_stats { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6f7eb3b21e2b..0d45f35aee72 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -145,9 +145,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) /* Collect firts the SW counters and then HW for consistency */ s->tso_packets = 0; s->tso_bytes = 0; + s->tso_inner_packets = 0; + s->tso_inner_bytes = 0; s->tx_queue_stopped = 0; s->tx_queue_wake = 0; s->tx_queue_dropped = 0; + s->tx_csum_inner = 0; tx_offload_none = 0; s->lro_packets = 0; s->lro_bytes = 0; @@ -168,9 +171,12 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->tso_packets += sq_stats->tso_packets; s->tso_bytes += sq_stats->tso_bytes; + s->tso_inner_packets += sq_stats->tso_inner_packets; + s->tso_inner_bytes += sq_stats->tso_inner_bytes; s->tx_queue_stopped += sq_stats->stopped; s->tx_queue_wake += sq_stats->wake; s->tx_queue_dropped += sq_stats->dropped; + s->tx_csum_inner += sq_stats->csum_offload_inner; tx_offload_none += sq_stats->csum_offload_none; } } @@ -245,7 +251,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) s->tx_broadcast_bytes; /* Update calculated offload counters */ - s->tx_csum_offload = s->tx_packets - tx_offload_none; + s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner; s->rx_csum_good = s->rx_packets - s->rx_csum_none - s->rx_csum_sw; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 6ce2884388cc..a05c070cbc2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -187,11 +187,13 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; - if (skb->encapsulation) + if (skb->encapsulation) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM | MLX5_ETH_WQE_L4_INNER_CSUM; - else + sq->stats.csum_offload_inner++; + } else { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; + } } else sq->stats.csum_offload_none++; @@ -201,21 +203,21 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) } if (skb_is_gso(skb)) { - u32 payload_len; - eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size); opcode = MLX5_OPCODE_LSO; - if (skb->encapsulation) + if (skb->encapsulation) { ihs = skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); - else + sq->stats.tso_inner_packets++; + sq->stats.tso_inner_bytes += skb->len - ihs; + } else { ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); + sq->stats.tso_packets++; + sq->stats.tso_bytes += skb->len - ihs; + } - payload_len = skb->len - ihs; wi->num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; - sq->stats.tso_packets++; - sq->stats.tso_bytes += payload_len; } else { bf = sq->bf_budget && !skb->xmit_more && -- cgit v1.2.3 From b0fd2eb28bd47e0c906e31dbaccaf17883e238ff Mon Sep 17 00:00:00 2001 From: "ajit.khaparde@broadcom.com" Date: Tue, 23 Feb 2016 00:33:48 +0530 Subject: be2net: Declare some u16 fields as u32 to improve performance When 16-bit integers are loaded on CPUs with high order native register sizes, the CPU could use some extra ops before using them. And currently some of the frequently used fields in the driver like the producer and consumer indices of the queues are declared as u16. This patch declares such fields as u32. With this change we see the 64-byte packets per second numbers improve by about 4%. Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 18 +++++++++--------- drivers/net/ethernet/emulex/benet/be_cmds.c | 2 +- drivers/net/ethernet/emulex/benet/be_main.c | 16 +++++++++------- 3 files changed, 19 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index ab24f84060c6..756f25bd8ee3 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -124,27 +124,27 @@ struct be_dma_mem { }; struct be_queue_info { + u32 len; + u32 entry_size; /* Size of an element in the queue */ + u32 tail, head; + atomic_t used; /* Number of valid elements in the queue */ + u32 id; struct be_dma_mem dma_mem; - u16 len; - u16 entry_size; /* Size of an element in the queue */ - u16 id; - u16 tail, head; bool created; - atomic_t used; /* Number of valid elements in the queue */ }; -static inline u32 MODULO(u16 val, u16 limit) +static inline u32 MODULO(u32 val, u32 limit) { BUG_ON(limit & (limit - 1)); return val & (limit - 1); } -static inline void index_adv(u16 *index, u16 val, u16 limit) +static inline void index_adv(u32 *index, u32 val, u32 limit) { *index = MODULO((*index + val), limit); } -static inline void index_inc(u16 *index, u16 limit) +static inline void index_inc(u32 *index, u32 limit) { *index = MODULO((*index + 1), limit); } @@ -169,7 +169,7 @@ static inline void queue_head_inc(struct be_queue_info *q) index_inc(&q->head, q->len); } -static inline void index_dec(u16 *index, u16 limit) +static inline void index_dec(u32 *index, u32 limit) { *index = MODULO((*index - 1), limit); } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 66fa21426fe2..22402db275f2 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -596,7 +596,7 @@ static int be_mcc_notify_wait(struct be_adapter *adapter) int status; struct be_mcc_wrb *wrb; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - u16 index = mcc_obj->q.head; + u32 index = mcc_obj->q.head; struct be_cmd_resp_hdr *resp; index_dec(&index, mcc_obj->q.len); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 46248467e206..0bd64f1f9778 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -849,9 +849,9 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb, } /* Grab a WRB header for xmit */ -static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo) +static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo) { - u16 head = txo->q.head; + u32 head = txo->q.head; queue_head_inc(&txo->q); return head; @@ -895,7 +895,7 @@ static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr, * WRBs of the current packet are unmapped. Invoked to handle tx setup errors. */ static void be_xmit_restore(struct be_adapter *adapter, - struct be_tx_obj *txo, u16 head, bool map_single, + struct be_tx_obj *txo, u32 head, bool map_single, u32 copied) { struct device *dev; @@ -930,7 +930,7 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, struct device *dev = &adapter->pdev->dev; struct be_queue_info *txq = &txo->q; bool map_single = false; - u16 head = txq->head; + u32 head = txq->head; dma_addr_t busaddr; int len; @@ -1990,7 +1990,7 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo) struct be_adapter *adapter = rxo->adapter; struct be_rx_page_info *rx_page_info; struct be_queue_info *rxq = &rxo->q; - u16 frag_idx = rxq->tail; + u32 frag_idx = rxq->tail; rx_page_info = &rxo->page_info_tbl[frag_idx]; BUG_ON(!rx_page_info->page); @@ -2401,10 +2401,11 @@ static u16 be_tx_compl_process(struct be_adapter *adapter, { struct sk_buff **sent_skbs = txo->sent_skb_list; struct be_queue_info *txq = &txo->q; - u16 frag_index, num_wrbs = 0; struct sk_buff *skb = NULL; bool unmap_skb_hdr = false; struct be_eth_wrb *wrb; + u16 num_wrbs = 0; + u32 frag_index; do { if (sent_skbs[txq->tail]) { @@ -2516,10 +2517,11 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) static void be_tx_compl_clean(struct be_adapter *adapter) { - u16 end_idx, notified_idx, cmpl = 0, timeo = 0, num_wrbs = 0; struct device *dev = &adapter->pdev->dev; + u16 cmpl = 0, timeo = 0, num_wrbs = 0; struct be_tx_compl_info *txcp; struct be_queue_info *txq; + u32 end_idx, notified_idx; struct be_tx_obj *txo; int i, pending_txqs; -- cgit v1.2.3 From 127bfce54197a6cd43205b7ba950547918128053 Mon Sep 17 00:00:00 2001 From: "ajit.khaparde@broadcom.com" Date: Tue, 23 Feb 2016 00:35:01 +0530 Subject: be2net: Fix a UE caused by passing large frames to the ASIC In QnQ configurations like Flex-10 where the VLANs are inserted by the ASIC, on rare occasions the HW is encountering a scenario where the final frame length ends to be greater than what the ASIC can support. This is because when the TXULP pulls the TX WRB to check the length of the frame to be transmitted it also adds the size of VLANs to be inserted by the HW to the length of the frame indicated in the WRB, which in some cases fails the range check. This causes a UE. Avoid this by trimming the skb length to accommodate the VLAN insertion. Signed-off-by: Ajit Khaparde Signed-off-by: David S. Miller --- drivers/net/ethernet/emulex/benet/be.h | 3 +++ drivers/net/ethernet/emulex/benet/be_main.c | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 756f25bd8ee3..ee584c59ff62 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -72,6 +72,9 @@ #define BE_MAX_MTU (BE_MAX_JUMBO_FRAME_SIZE - \ (ETH_HLEN + ETH_FCS_LEN)) +/* Accommodate for QnQ configurations where VLAN insertion is enabled in HW */ +#define BE_MAX_GSO_SIZE (65535 - 2 * VLAN_HLEN) + #define BE_NUM_VLANS_SUPPORTED 64 #define BE_MAX_EQD 128u #define BE_MAX_TX_FRAG_COUNT 30 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0bd64f1f9778..17422b20a8ec 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1123,6 +1123,8 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, struct sk_buff *skb, struct be_wrb_params *wrb_params) { + int err; + /* Lancer, SH and BE3 in SRIOV mode have a bug wherein * packets that are 32b or less may cause a transmit stall * on that port. The workaround is to pad such packets @@ -1139,6 +1141,13 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, return NULL; } + /* The stack can send us skbs with length greater than + * what the HW can handle. Trim the extra bytes. + */ + WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE); + err = pskb_trim(skb, BE_MAX_GSO_SIZE); + WARN_ON(err); + return skb; } @@ -4850,7 +4859,7 @@ static void be_netdev_init(struct net_device *netdev) netdev->flags |= IFF_MULTICAST; - netif_set_gso_max_size(netdev, 65535 - ETH_HLEN); + netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN); netdev->netdev_ops = &be_netdev_ops; -- cgit v1.2.3 From 9e4e6206c67ae11d68fc96882256f37c237087d4 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Mon, 22 Feb 2016 22:51:13 +0100 Subject: bgmac: support Ethernet device on BCM47094 SoC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It needs very similar workarounds to the one on BCM4707. It was tested on D-Link DIR-885L home router. Signed-off-by: Rafał Miłecki Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bgmac.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 230f8e6209e5..99b30a952b38 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -30,6 +30,7 @@ static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac) { switch (bgmac->core->bus->chipinfo.id) { case BCMA_CHIP_ID_BCM4707: + case BCMA_CHIP_ID_BCM47094: case BCMA_CHIP_ID_BCM53018: return true; default: @@ -1052,8 +1053,9 @@ static void bgmac_chip_reset(struct bgmac *bgmac) (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) iost &= ~BGMAC_BCMA_IOST_ATTACHED; - /* 3GMAC: for BCM4707, only do core reset at bgmac_probe() */ - if (ci->id != BCMA_CHIP_ID_BCM4707) { + /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ + if (ci->id != BCMA_CHIP_ID_BCM4707 && + ci->id != BCMA_CHIP_ID_BCM47094) { flags = 0; if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; -- cgit v1.2.3 From e19d0839e75718a0b06db66f6f7ec2df515ae0c2 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 23 Feb 2016 11:48:37 +0200 Subject: gianfar: Map head TxBD first Move the mapping of the head BD before the mapping of fragments. The TOE (h/w offload) decision logic block can be also moved up (as the TOE flag belongs to the head BD), resulting in more localized code (TOE logic vs BD mapping code blocks). Note that, for this h/w, the R (status) bit for the head BD of a S/G frame needs to be written last for a reliable transmission. For the fragmented skb case, a local variable is used to temporarily store the status info of the first BD, replacing a BD status read. A merge of 2 "if(do_tstamp)" blocks was also possible. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 96 ++++++++++++++++---------------- 1 file changed, 48 insertions(+), 48 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 2aa7b401cc3b..cb80dba8dd2b 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2389,6 +2389,47 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp = txbdp_start = tx_queue->cur_tx; lstatus = be32_to_cpu(txbdp->lstatus); + /* Add TxPAL between FCB and frame if required */ + if (unlikely(do_tstamp)) { + skb_push(skb, GMAC_TXPAL_LEN); + memset(skb->data, 0, GMAC_TXPAL_LEN); + } + + /* Add TxFCB if required */ + if (fcb_len) { + fcb = gfar_add_fcb(skb); + lstatus |= BD_LFLAG(TXBD_TOE); + } + + /* Set up checksumming */ + if (do_csum) { + gfar_tx_checksum(skb, fcb, fcb_len); + + if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || + unlikely(gfar_csum_errata_76(priv, skb->len))) { + __skb_pull(skb, GMAC_FCB_LEN); + skb_checksum_help(skb); + if (do_vlan || do_tstamp) { + /* put back a new fcb for vlan/tstamp TOE */ + fcb = gfar_add_fcb(skb); + } else { + /* Tx TOE not used */ + lstatus &= ~(BD_LFLAG(TXBD_TOE)); + fcb = NULL; + } + } + } + + if (do_vlan) + gfar_tx_vlan(skb, fcb); + + bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, bufaddr))) + goto dma_map_err; + + txbdp_start->bufPtr = cpu_to_be32(bufaddr); + /* Time stamp insertion requires one additional TxBD */ if (unlikely(do_tstamp)) txbdp_tstamp = txbdp = next_txbd(txbdp, base, @@ -2404,6 +2445,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); } } else { + u32 lstatus_start = lstatus; + /* Place the fragment addresses and lengths into the TxBDs */ for (i = 0; i < nr_frags; i++) { unsigned int frag_len; @@ -2432,56 +2475,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp->lstatus = cpu_to_be32(lstatus); } - lstatus = be32_to_cpu(txbdp_start->lstatus); + lstatus = lstatus_start; } - /* Add TxPAL between FCB and frame if required */ - if (unlikely(do_tstamp)) { - skb_push(skb, GMAC_TXPAL_LEN); - memset(skb->data, 0, GMAC_TXPAL_LEN); - } - - /* Add TxFCB if required */ - if (fcb_len) { - fcb = gfar_add_fcb(skb); - lstatus |= BD_LFLAG(TXBD_TOE); - } - - /* Set up checksumming */ - if (do_csum) { - gfar_tx_checksum(skb, fcb, fcb_len); - - if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) || - unlikely(gfar_csum_errata_76(priv, skb->len))) { - __skb_pull(skb, GMAC_FCB_LEN); - skb_checksum_help(skb); - if (do_vlan || do_tstamp) { - /* put back a new fcb for vlan/tstamp TOE */ - fcb = gfar_add_fcb(skb); - } else { - /* Tx TOE not used */ - lstatus &= ~(BD_LFLAG(TXBD_TOE)); - fcb = NULL; - } - } - } - - if (do_vlan) - gfar_tx_vlan(skb, fcb); - - /* Setup tx hardware time stamping if requested */ - if (unlikely(do_tstamp)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - fcb->ptp = 1; - } - - bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(priv->dev, bufaddr))) - goto dma_map_err; - - txbdp_start->bufPtr = cpu_to_be32(bufaddr); - /* If time stamping is requested one additional TxBD must be set up. The * first TxBD points to the FCB and must have a data length of * GMAC_FCB_LEN. The second TxBD points to the actual frame data with @@ -2498,6 +2494,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN; + + /* Setup tx hardware time stamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + fcb->ptp = 1; } else { lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); } -- cgit v1.2.3 From 42f397adfc0a062a17119c08c82ed710b143a006 Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 23 Feb 2016 11:48:38 +0200 Subject: gianfar: Use skb_frag_t pointers inside xmit() Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index cb80dba8dd2b..7b16ce637a8e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2322,6 +2322,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) struct txfcb *fcb = NULL; struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL; u32 lstatus; + skb_frag_t *frag; int i, rq = 0; int do_tstamp, do_csum, do_vlan; u32 bufaddr; @@ -2448,25 +2449,24 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) u32 lstatus_start = lstatus; /* Place the fragment addresses and lengths into the TxBDs */ - for (i = 0; i < nr_frags; i++) { - unsigned int frag_len; + frag = &skb_shinfo(skb)->frags[0]; + for (i = 0; i < nr_frags; i++, frag++) { + unsigned int size; + /* Point at the next BD, wrapping as needed */ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - frag_len = skb_shinfo(skb)->frags[i].size; + size = skb_frag_size(frag); - lstatus = be32_to_cpu(txbdp->lstatus) | frag_len | + lstatus = be32_to_cpu(txbdp->lstatus) | size | BD_LFLAG(TXBD_READY); /* Handle the last BD specially */ if (i == nr_frags - 1) lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - bufaddr = skb_frag_dma_map(priv->dev, - &skb_shinfo(skb)->frags[i], - 0, - frag_len, - DMA_TO_DEVICE); + bufaddr = skb_frag_dma_map(priv->dev, frag, 0, + size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(priv->dev, bufaddr))) goto dma_map_err; -- cgit v1.2.3 From 48963b4492e98fd3b3040755a16215d72552f1bb Mon Sep 17 00:00:00 2001 From: Claudiu Manoil Date: Tue, 23 Feb 2016 11:48:39 +0200 Subject: gianfar: Remove redundant ops for do_tstamp from xmit() Timestamp BD status updates that can be merged into the same "do_tstamp" block, no need for extra save/restore to the BD area. The code is more readable too. Signed-off-by: Claudiu Manoil Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7b16ce637a8e..1e1157fa77f4 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2436,15 +2436,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) txbdp_tstamp = txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size); - if (nr_frags == 0) { - if (unlikely(do_tstamp)) { - u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus); - - lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); - } else { - lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); - } + if (likely(!nr_frags)) { + lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); } else { u32 lstatus_start = lstatus; @@ -2488,8 +2481,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) bufaddr = be32_to_cpu(txbdp_start->bufPtr); bufaddr += fcb_len; + lstatus_ts |= BD_LFLAG(TXBD_READY) | (skb_headlen(skb) - fcb_len); + if (!nr_frags) + lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr); txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts); -- cgit v1.2.3 From 6721e9d568741ced04b1fe6eed42f2ddf585eac4 Mon Sep 17 00:00:00 2001 From: Raanan Avargil Date: Tue, 22 Dec 2015 15:35:01 +0200 Subject: e1000e: Increase ULP timer Due to system level changes introduced in Skylake, ULP exit takes significantly longer to occur. Therefore, driver must wait longer for. Signed-off-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index a049e30639a1..c731465d1999 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1252,9 +1252,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) ew32(H2ME, mac_reg); } - /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */ while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { - if (i++ == 10) { + if (i++ == 30) { ret_val = -E1000_ERR_PHY; goto out; } -- cgit v1.2.3 From 74f31299a41e729226d60426087592b6790f22b7 Mon Sep 17 00:00:00 2001 From: Raanan Avargil Date: Tue, 22 Dec 2015 15:35:02 +0200 Subject: e1000e: Increase PHY PLL clock gate timing Several packet loss issues were reported for which the root cause for them was an incorrect configuration of internal HW PHY clock gating mechanism by SW. This patch provides the correct mechanism. Signed-off-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 12 ++++++++++++ drivers/net/ethernet/intel/e1000e/ich8lan.h | 3 +++ 2 files changed, 15 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index c731465d1999..786d2148dd67 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1433,6 +1433,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) emi_addr = I217_RX_CONFIG; ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + if (hw->mac.type == e1000_pch_lpt || + hw->mac.type == e1000_pch_spt) { + u16 phy_reg; + + e1e_rphy_locked(hw, I217_PLL_CLOCK_GATE_REG, &phy_reg); + phy_reg &= ~I217_PLL_CLOCK_GATE_MASK; + if (speed == SPEED_100 || speed == SPEED_10) + phy_reg |= 0x3E8; + else + phy_reg |= 0xFA; + e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg); + } hw->phy.ops.release(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 34c551e322eb..7d85f002c3d2 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -226,6 +226,9 @@ #define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 #define HV_PM_CTRL_K1_ENABLE 0x4000 +#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28) +#define I217_PLL_CLOCK_GATE_MASK 0x07FF + #define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ /* Inband Control */ -- cgit v1.2.3 From c26f40daf4e32f970b8337a88b65a8d00332ae6f Mon Sep 17 00:00:00 2001 From: Raanan Avargil Date: Tue, 22 Dec 2015 15:35:03 +0200 Subject: e1000e: Set HW FIFO minimum pointer gap for non-gig speeds Based on feedback from HW team, the configured value of the internal PHY HW FIFO pointer gap was incorrect for non-gig speeds. This patch provides the correct configuration. Signed-off-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 786d2148dd67..e7ccf5ffa835 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1479,6 +1479,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) hw->phy.ops.release(hw); if (ret_val) return ret_val; + } else { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_wphy_locked(hw, + PHY_REG(776, 20), + 0xC023); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } } } -- cgit v1.2.3 From c5c6d07761a9ff64f0ffff2ca410a578fb7c4579 Mon Sep 17 00:00:00 2001 From: Raanan Avargil Date: Tue, 22 Dec 2015 15:35:04 +0200 Subject: e1000e: Clear ULP configuration register on ULP exit There have been bugs caused by HW ULP configuration settings not being properly cleared after cable connect in V-Pro capable systems. This caused HW to get out of sync occasionally. The fix ensures that ULP settings are cleared in HW after LAN cable re-connect. Signed-off-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/ich8lan.c | 2 ++ drivers/net/ethernet/intel/e1000e/ich8lan.h | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index e7ccf5ffa835..c0f4887ea44d 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1328,6 +1328,8 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) I218_ULP_CONFIG1_RESET_TO_SMBUS | I218_ULP_CONFIG1_WOL_HOST | I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_EN_ULP_LANPHYPC | + I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST | I218_ULP_CONFIG1_DISABLE_SMB_PERST); e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 7d85f002c3d2..2311f6003f58 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -188,6 +188,10 @@ #define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ #define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ #define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +/* enable ULP even if when phy powered down via lanphypc */ +#define I218_ULP_CONFIG1_EN_ULP_LANPHYPC 0x0400 +/* disable clear of sticky ULP on PERST */ +#define I218_ULP_CONFIG1_DIS_CLR_STICKY_ON_PERST 0x0800 #define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ /* SMBus Address Phy Register */ -- cgit v1.2.3 From 9cd34b3a1cfd47692cbef8cb0761475021883e18 Mon Sep 17 00:00:00 2001 From: Raanan Avargil Date: Tue, 22 Dec 2015 15:35:05 +0200 Subject: e1000e: Initial support for KabeLake i219 (4) and i219 (5) are the next LOM generations that will be available on the next Intel platform (KabeLake). This patch provides the initial support for the devices. Signed-off-by: Raanan Avargil Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/e1000e/hw.h | 4 ++++ drivers/net/ethernet/intel/e1000e/netdev.c | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index b3949d5bef5c..4e733bf1a38e 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -92,6 +92,10 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ #define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LBG PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7 +#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8 +#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3 +#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6 #define E1000_REVISION_4 4 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index c71ba1bfc1ec..9b4ec13d9161 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7452,6 +7452,10 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LBG_I219_LM3), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM4), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V4), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM5), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V5), board_pch_spt }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; -- cgit v1.2.3 From b72f3f72005dfd649d787535bd04ada3b3f1b3ba Mon Sep 17 00:00:00 2001 From: Takuma Ueba Date: Thu, 31 Dec 2015 14:58:14 +0900 Subject: igb: When GbE link up, wait for Remote receiver status condition I210 device IPv6 autoconf test sometimes fails, because DAD NS for link-local is not transmitted. This packet is silently dropped. This problem is seen only GbE environment. igb_watchdog_task link up detection continues to the following process. The following cases are observed: 1.PHY 1000BASE-T Status Register Remote receiver status bit is NG. (NG status becomes OK after about 200 - 700ms) 2.In this case, the transfer packet is silently dropped. 1000BASE-T Status register [Expected]: 0x3800 or 0x7800 [problem occurred]: 0x2800 or 0x6800 Frequency of occurrence: approx 1/10 - 1/40 observed In order to avoid this problem, wait until 1000BASE-T Status register "Remote receiver status OK" After applying this patch, at least 400 runs succeed with no problems. Signed-off-by: Takuma Ueba Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index af46fcf8a50e..1aa01809bcaa 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4357,6 +4357,7 @@ static void igb_watchdog_task(struct work_struct *work) u32 link; int i; u32 connsw; + u16 phy_data, retry_count = 20; link = igb_has_link(adapter); @@ -4435,6 +4436,25 @@ static void igb_watchdog_task(struct work_struct *work) break; } + if (adapter->link_speed != SPEED_1000) + goto no_wait; + + /* wait for Remote receiver status OK */ +retry_read_status: + if (!igb_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data)) { + if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && + retry_count) { + msleep(100); + retry_count--; + goto retry_read_status; + } else if (!retry_count) { + dev_err(&adapter->pdev->dev, "exceed max 2 second\n"); + } + } else { + dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n"); + } +no_wait: netif_carrier_on(netdev); igb_ping_all_vfs(adapter); -- cgit v1.2.3 From 5b70e4a12a525b5f3d4a3e3f0567ed877195b187 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 3 Jan 2016 07:44:56 +0100 Subject: igb: constify e1000_phy_operations structure This e1000_phy_operations structure is never modified, so declare it as const. Other structures of this type are already const. Done with the help of Coccinelle. Signed-off-by: Julia Lawall Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.c | 2 +- drivers/net/ethernet/intel/igb/e1000_hw.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 9a1a9c7b0748..a23aa6704394 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -2920,7 +2920,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = { #endif }; -static struct e1000_phy_operations e1000_phy_ops_82575 = { +static const struct e1000_phy_operations e1000_phy_ops_82575 = { .acquire = igb_acquire_phy_82575, .get_cfg_done = igb_get_cfg_done_82575, .release = igb_release_phy_82575, diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h index f0c416e21d2c..2fb2213cd562 100644 --- a/drivers/net/ethernet/intel/igb/e1000_hw.h +++ b/drivers/net/ethernet/intel/igb/e1000_hw.h @@ -372,7 +372,7 @@ struct e1000_thermal_sensor_data { struct e1000_info { s32 (*get_invariants)(struct e1000_hw *); struct e1000_mac_operations *mac_ops; - struct e1000_phy_operations *phy_ops; + const struct e1000_phy_operations *phy_ops; struct e1000_nvm_operations *nvm_ops; }; -- cgit v1.2.3 From 5e350b9260a2e94a9dd1b20fb720d855d5bf1034 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Tue, 5 Jan 2016 10:08:28 -0800 Subject: igb: enable WoL for OEM devices regardless of EEPROM setting Override EEPROM settings for specific OEM devices. Signed-off-by: Todd Fujinaka Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 1aa01809bcaa..4238f3edaa3e 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2538,6 +2538,26 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->wol = 0; } + /* Some vendors want the ability to Use the EEPROM setting as + * enable/disable only, and not for capability + */ + if (((hw->mac.type == e1000_i350) || + (hw->mac.type == e1000_i354)) && + (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (hw->mac.type == e1000_i350) { + if (((pdev->subsystem_device == 0x5001) || + (pdev->subsystem_device == 0x5002)) && + (hw->bus.func == 0)) { + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + adapter->wol = 0; + } + if (pdev->subsystem_device == 0x1F52) + adapter->flags |= IGB_FLAG_WOL_SUPPORTED; + } + device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGB_FLAG_WOL_SUPPORTED); -- cgit v1.2.3 From 569f3b3d4e9898ae30788fde128e3277d996710e Mon Sep 17 00:00:00 2001 From: Roland Hii Date: Mon, 11 Jan 2016 15:34:18 +0800 Subject: igb: add conditions for I210 to generate periodic clock output In general case the maximum supported half cycle time of the synchronized output clock is 70msec. Slower half cycle time than 70msec can be programmed also as long as the output clock is synchronized to whole seconds, useful specifically for generating a 1Hz clock. Permitted values for the clock half cycle time are: 125,000,000 decimal, 250,000,000 decimal and 500,000,000 decimal (equals to 125msec, 250msec and 500msec respectively). Before this patch, only the half cycle time of less than or equal to 70msec uses the I210 clock output function. This patch adds additional conditions when half cycle time is equal to 125msec or 250msec or 500msec to use clock output function. Under other conditions, interrupt driven target time output events method is still used to generate the desired clock output. Signed-off-by: Roland Hii Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_ptp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index c44df87c38de..22a8a29895b4 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -525,7 +525,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp, ts.tv_nsec = rq->perout.period.nsec; ns = timespec64_to_ns(&ts); ns = ns >> 1; - if (on && ns <= 70000000LL) { + if (on && ((ns <= 70000000LL) || (ns == 125000000LL) || + (ns == 250000000LL) || (ns == 500000000LL))) { if (ns < 8LL) return -EINVAL; use_freq = 1; -- cgit v1.2.3 From c883de9fd787b6f49bf825f3de3601aeb78a7114 Mon Sep 17 00:00:00 2001 From: Todd Fujinaka Date: Mon, 11 Jan 2016 09:34:50 -0800 Subject: igb: rename igb define to be more generic E1000_MRQC_ENABLE_RSS_4Q enables 4 and 8 queues depending on the part so rename to be generic. Similarly, E1000_MRQC_ENABLE_VMDQ_RSS_2Q has no numeric meaning so rename to be more generic. Signed-off-by: Todd Fujinaka Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/e1000_82575.h | 4 ++-- drivers/net/ethernet/intel/igb/igb_main.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 2154aea7aa7e..de8805a2a2fe 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -56,10 +56,10 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr, #define E1000_SRRCTL_TIMESTAMP 0x40000000 -#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 +#define E1000_MRQC_ENABLE_RSS_MQ 0x00000002 #define E1000_MRQC_ENABLE_VMDQ 0x00000003 #define E1000_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 -#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q 0x00000005 +#define E1000_MRQC_ENABLE_VMDQ_RSS_MQ 0x00000005 #define E1000_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 #define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 4238f3edaa3e..99acd84841e4 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3497,12 +3497,12 @@ static void igb_setup_mrqc(struct igb_adapter *adapter) wr32(E1000_VT_CTL, vtctl); } if (adapter->rss_queues > 1) - mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_2Q; + mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ; else mrqc |= E1000_MRQC_ENABLE_VMDQ; } else { if (hw->mac.type != e1000_i211) - mrqc |= E1000_MRQC_ENABLE_RSS_4Q; + mrqc |= E1000_MRQC_ENABLE_RSS_MQ; } igb_vmm_control(adapter); -- cgit v1.2.3 From 6e033700887bf29d4e59f6978a02d989787be620 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 13 Jan 2016 07:31:23 -0800 Subject: igb: Add support for generic Tx checksums This patch adds support for generic Tx checksums to the igb driver. It turns out this is actually pretty easy after going over the datasheet as we were doing a number of steps we didn't need to. In order to perform a Tx checksum for an L4 header we need to fill in the following fields in the Tx descriptor: MACLEN (maximum of 127), retrieved from: skb_network_offset() IPLEN (maximum of 511), retrieved from: skb_checksum_start_offset() - skb_network_offset() TUCMD.L4T indicates offset and if checksum or crc32c, based on: skb->csum_offset The added advantage to doing this is that we can support inner checksum offloads for tunnels and MPLS while still being able to transparently insert VLAN tags. I also took the opportunity to clean-up many of the feature flag configuration bits to make them a bit more consistent between drivers. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 106 ++++++++++++++---------------- 1 file changed, 48 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 99acd84841e4..d90dcd16f57c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2372,27 +2372,35 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * assignment. */ netdev->features |= NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX; + if (hw->mac.type >= e1000_82576) + netdev->features |= NETIF_F_SCTP_CRC; + /* copy netdev features into list of user selectable features */ netdev->hw_features |= netdev->features; netdev->hw_features |= NETIF_F_RXALL; + if (hw->mac.type >= e1000_i350) + netdev->hw_features |= NETIF_F_NTUPLE; + /* set this bit last since it cannot be part of hw_features */ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - netdev->vlan_features |= NETIF_F_TSO | + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_SG; + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->priv_flags |= IFF_SUPP_NOFCS; @@ -2401,11 +2409,6 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->vlan_features |= NETIF_F_HIGHDMA; } - if (hw->mac.type >= e1000_82576) { - netdev->hw_features |= NETIF_F_SCTP_CRC; - netdev->features |= NETIF_F_SCTP_CRC; - } - netdev->priv_flags |= IFF_UNICAST_FLT; adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); @@ -4883,70 +4886,57 @@ static int igb_tso(struct igb_ring *tx_ring, return 1; } +static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb) +{ + unsigned int offset = 0; + + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); + + return offset == skb_checksum_start_offset(skb); +} + static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first) { struct sk_buff *skb = first->skb; u32 vlan_macip_lens = 0; - u32 mss_l4len_idx = 0; u32 type_tucmd = 0; if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: if (!(first->tx_flags & IGB_TX_FLAGS_VLAN)) return; - } else { - u8 l4_hdr = 0; - - switch (first->protocol) { - case htons(ETH_P_IP): - vlan_macip_lens |= skb_network_header_len(skb); - type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; - l4_hdr = ip_hdr(skb)->protocol; - break; - case htons(ETH_P_IPV6): - vlan_macip_lens |= skb_network_header_len(skb); - l4_hdr = ipv6_hdr(skb)->nexthdr; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but proto=%x!\n", - first->protocol); - } - break; - } + goto no_csum; + } - switch (l4_hdr) { - case IPPROTO_TCP: - type_tucmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - mss_l4len_idx = tcp_hdrlen(skb) << - E1000_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_SCTP: - type_tucmd |= E1000_ADVTXD_TUCMD_L4T_SCTP; - mss_l4len_idx = sizeof(struct sctphdr) << - E1000_ADVTXD_L4LEN_SHIFT; - break; - case IPPROTO_UDP: - mss_l4len_idx = sizeof(struct udphdr) << - E1000_ADVTXD_L4LEN_SHIFT; - break; - default: - if (unlikely(net_ratelimit())) { - dev_warn(tx_ring->dev, - "partial checksum but l4 proto=%x!\n", - l4_hdr); - } + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((first->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((first->protocol == htons(ETH_P_IPV6)) && + igb_ipv6_csum_is_sctp(skb))) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; break; } - - /* update TX checksum flag */ - first->tx_flags |= IGB_TX_FLAGS_CSUM; + default: + skb_checksum_help(skb); + goto csum_failed; } + /* update TX checksum flag */ + first->tx_flags |= IGB_TX_FLAGS_CSUM; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK; - igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); + igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); } #define IGB_SET_FLAG(_input, _flag, _result) \ -- cgit v1.2.3 From ea6ce6024f9397ff2667fe16447447e622bc4c31 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 13 Jan 2016 07:31:30 -0800 Subject: igbvf: Add support for generic Tx checksums This patch adds support for generic Tx checksums to the igbvf driver. It turns out this is actually pretty easy after going over the datasheet as we were doing a number of steps we didn't need to. In order to perform a Tx checksum for an L4 header we need to fill in the following fields in the Tx descriptor: MACLEN (maximum of 127), retrieved from: skb_network_offset() IPLEN (maximum of 511), retrieved from: skb_checksum_start_offset() - skb_network_offset() TUCMD.L4T indicates offset and if checksum or crc32c, based on: skb->csum_offset The added advantage to doing this is that we can support inner checksum offloads for tunnels and MPLS while still being able to transparently insert VLAN tags. I also took the opportunity to clean-up many of the feature flag configuration bits to make them a bit more consistent between drivers. In the case of the VF drivers this meant adding support for SCTP CRCs, and inner checksum offloads for MPLS and various tunnel types. Signed-off-by: Alexander Duyck Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/netdev.c | 142 +++++++++++++++++------------- drivers/net/ethernet/intel/igbvf/vf.h | 1 + 2 files changed, 82 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 297af801f051..aa3486556630 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -43,6 +43,7 @@ #include #include #include +#include #include "igbvf.h" @@ -1908,6 +1909,31 @@ static void igbvf_watchdog_task(struct work_struct *work) #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 +static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens, + u32 type_tucmd, u32 mss_l4len_idx) +{ + struct e1000_adv_tx_context_desc *context_desc; + struct igbvf_buffer *buffer_info; + u16 i = tx_ring->next_to_use; + + context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + i++; + tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; + + /* set bits to identify this as an advanced context descriptor */ + type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; + + context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); + context_desc->seqnum_seed = 0; + context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); + context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); + + buffer_info->time_stamp = jiffies; + buffer_info->dma = 0; +} + static int igbvf_tso(struct igbvf_adapter *adapter, struct igbvf_ring *tx_ring, struct sk_buff *skb, u32 tx_flags, u8 *hdr_len, @@ -1987,65 +2013,56 @@ static int igbvf_tso(struct igbvf_adapter *adapter, return true; } -static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, - struct igbvf_ring *tx_ring, - struct sk_buff *skb, u32 tx_flags, - __be16 protocol) +static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb) { - struct e1000_adv_tx_context_desc *context_desc; - unsigned int i; - struct igbvf_buffer *buffer_info; - u32 info = 0, tu_cmd = 0; - - if ((skb->ip_summed == CHECKSUM_PARTIAL) || - (tx_flags & IGBVF_TX_FLAGS_VLAN)) { - i = tx_ring->next_to_use; - buffer_info = &tx_ring->buffer_info[i]; - context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); + unsigned int offset = 0; - if (tx_flags & IGBVF_TX_FLAGS_VLAN) - info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); + ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); - info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); - if (skb->ip_summed == CHECKSUM_PARTIAL) - info |= (skb_transport_header(skb) - - skb_network_header(skb)); + return offset == skb_checksum_start_offset(skb); +} - context_desc->vlan_macip_lens = cpu_to_le32(info); +static bool igbvf_tx_csum(struct igbvf_ring *tx_ring, struct sk_buff *skb, + u32 tx_flags, __be16 protocol) +{ + u32 vlan_macip_lens = 0; + u32 type_tucmd = 0; - tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); + if (skb->ip_summed != CHECKSUM_PARTIAL) { +csum_failed: + if (!(tx_flags & IGBVF_TX_FLAGS_VLAN)) + return false; + goto no_csum; + } - if (skb->ip_summed == CHECKSUM_PARTIAL) { - switch (protocol) { - case htons(ETH_P_IP): - tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; - if (ip_hdr(skb)->protocol == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - break; - case htons(ETH_P_IPV6): - if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) - tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; - break; - default: - break; - } + switch (skb->csum_offset) { + case offsetof(struct tcphdr, check): + type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; + /* fall through */ + case offsetof(struct udphdr, check): + break; + case offsetof(struct sctphdr, checksum): + /* validate that this is actually an SCTP request */ + if (((protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || + ((protocol == htons(ETH_P_IPV6)) && + igbvf_ipv6_csum_is_sctp(skb))) { + type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP; + break; } - - context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); - context_desc->seqnum_seed = 0; - context_desc->mss_l4len_idx = 0; - - buffer_info->time_stamp = jiffies; - buffer_info->dma = 0; - i++; - if (i == tx_ring->count) - i = 0; - tx_ring->next_to_use = i; - - return true; + default: + skb_checksum_help(skb); + goto csum_failed; } - return false; + vlan_macip_lens = skb_checksum_start_offset(skb) - + skb_network_offset(skb); +no_csum: + vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; + vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK; + + igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0); + return true; } static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) @@ -2264,7 +2281,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, if (tso) tx_flags |= IGBVF_TX_FLAGS_TSO; - else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags, protocol) && + else if (igbvf_tx_csum(tx_ring, skb, tx_flags, protocol) && (skb->ip_summed == CHECKSUM_PARTIAL)) tx_flags |= IGBVF_TX_FLAGS_CSUM; @@ -2717,11 +2734,11 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->bd_number = cards_found++; netdev->hw_features = NETIF_F_SG | - NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | - NETIF_F_TSO | - NETIF_F_TSO6 | - NETIF_F_RXCSUM; + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX | @@ -2731,11 +2748,14 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (pci_using_dac) netdev->features |= NETIF_F_HIGHDMA; - netdev->vlan_features |= NETIF_F_TSO; - netdev->vlan_features |= NETIF_F_TSO6; - netdev->vlan_features |= NETIF_F_IP_CSUM; - netdev->vlan_features |= NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_SG; + netdev->vlan_features |= NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM | + NETIF_F_SCTP_CRC; + + netdev->mpls_features |= NETIF_F_HW_CSUM; + netdev->hw_enc_features |= NETIF_F_HW_CSUM; /*reset the controller to put the device in a known good state */ err = hw->mac.ops.reset_hw(hw); diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h index 0f1eca639f68..f00a41d9a1ca 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.h +++ b/drivers/net/ethernet/intel/igbvf/vf.h @@ -126,6 +126,7 @@ struct e1000_adv_tx_context_desc { #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ #define E1000_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ #define E1000_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ -- cgit v1.2.3 From cc54a59ae6e528c70666033ed085d059f555a57d Mon Sep 17 00:00:00 2001 From: Jon Maxwell Date: Mon, 25 Jan 2016 10:22:52 +1100 Subject: igbvf: remove "link is Up" message when registering mcast address A similar issue was addressed a few years ago in the following thread: http://www.spinics.net/lists/netdev/msg245877.html At that time there were concerns that removing this statement may cause other side effects. However the submitter addressed those concerns. But the dialogue went cold. We have a new case where a customers application is registering and un-registering multicast addresses every few seconds. This is leading to many "Link is Up" messages in the logs as a result of the "netif_carrier_off(netdev)" statement called by igbvf_msix_other(). Also on some kernels it is interfering with the bonding driver causing it to failover and subsequently affecting connectivity. The Sourgeforge driver does not make this call and is therefore not affected. If there were any side effects I would expect that driver to also be affected. I have tested re-loading the igbvf driver and downing the adapter with the PF entity on the host where the VM has this patch. When I bring it back up again connectivity is restored as expected. Therefore I request that this patch gets submitted. Signed-off-by: Jon Maxwell Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igbvf/netdev.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index aa3486556630..c12442252adb 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -877,7 +877,6 @@ static irqreturn_t igbvf_msix_other(int irq, void *data) adapter->int_counter1++; - netif_carrier_off(netdev); hw->mac.get_link_status = 1; if (!test_bit(__IGBVF_DOWN, &adapter->state)) mod_timer(&adapter->watchdog_timer, jiffies + 1); -- cgit v1.2.3 From 030f9f52642a20cbd8c1334a237e92e3ef55e2b1 Mon Sep 17 00:00:00 2001 From: Corinna Vinschen Date: Thu, 28 Jan 2016 13:53:23 +0100 Subject: igb: Fix VLAN tag stripping on Intel i350 Problem: When switching off VLAN offloading on an i350, the VLAN interface gets unusable. For testing, set up a VLAN on an i350 and some remote machine, e.g.: $ ip link add link eth0 name eth0.42 type vlan id 42 $ ip addr add 192.168.42.1/24 dev eth0.42 $ ip link set dev eth0.42 up Offloading is switched on by default: $ ethtool -k eth0 | grep vlan-offload rx-vlan-offload: on tx-vlan-offload: on $ ping -c 3 -I eth0.42 192.168.42.2 [...works as usual...] Now switch off VLAN offloading and try again: $ ethtool -K eth0 rxvlan off Actual changes: rx-vlan-offload: off tx-vlan-offload: off [requested on] $ ping -c 3 -I eth0.42 192.168.42.2 PING 192.168.42.2 (192.168.42.2) from 192.168.42.1 eth0.42: 56(84) bytes of da ta. --- 192.168.42.2 ping statistics --- 3 packets transmitted, 0 received, 100% packet loss, time 1999ms I can only reproduce it on an i350, the above works fine on a 82580. While inspecting the igb source, I came across the code in igb_set_vmolr which sets the E1000_VMOLR_STRVLAN/E1000_DVMOLR_STRVLAN flags once and for all, and in all of the igb code there's no other place where the STRVLAN is set or cleared. Thus, VLAN stripping is enabled in igb unconditionally, independently of the offloading setting. I compared that to the latest Intel igb-5.3.3.5 driver from http://sourceforge.net/projects/e1000/ which in fact sets and clears the STRVLAN flag independently from igb_set_vmolr in its own function igb_set_vf_vlan_strip, depending on the vlan settings. So I included the STRVLAN handling from the igb-5.3.3.5 driver into our current igb driver and tested the above scenario again. This time ping still works after switching off VLAN offloading. Tested on i350, with and without addtional VFs, as well as on 82580 successfully. Signed-off-by: Corinna Vinschen Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 41 ++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index d90dcd16f57c..3b56f809967c 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3589,6 +3589,28 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, return 0; } +static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter, + int vfn, bool enable) +{ + struct e1000_hw *hw = &adapter->hw; + u32 val, reg; + + if (hw->mac.type < e1000_82576) + return; + + if (hw->mac.type == e1000_i350) + reg = E1000_DVMOLR(vfn); + else + reg = E1000_VMOLR(vfn); + + val = rd32(reg); + if (enable) + val |= E1000_VMOLR_STRVLAN; + else + val &= ~(E1000_VMOLR_STRVLAN); + wr32(reg, val); +} + static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn, bool aupe) { @@ -3602,14 +3624,6 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, return; vmolr = rd32(E1000_VMOLR(vfn)); - vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ - if (hw->mac.type == e1000_i350) { - u32 dvmolr; - - dvmolr = rd32(E1000_DVMOLR(vfn)); - dvmolr |= E1000_DVMOLR_STRVLAN; - wr32(E1000_DVMOLR(vfn), dvmolr); - } if (aupe) vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ else @@ -6099,6 +6113,7 @@ static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf, adapter->vf_data[vf].pf_vlan = vlan; adapter->vf_data[vf].pf_qos = qos; + igb_set_vf_vlan_strip(adapter, vf, true); dev_info(&adapter->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); if (test_bit(__IGB_DOWN, &adapter->state)) { @@ -6126,6 +6141,7 @@ static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf) adapter->vf_data[vf].pf_vlan = 0; adapter->vf_data[vf].pf_qos = 0; + igb_set_vf_vlan_strip(adapter, vf, false); return 0; } @@ -6146,6 +6162,7 @@ static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) { int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); + int ret; if (adapter->vf_data[vf].pf_vlan) return -1; @@ -6154,7 +6171,10 @@ static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) if (!vid && !add) return 0; - return igb_set_vf_vlan(adapter, vid, !!add, vf); + ret = igb_set_vf_vlan(adapter, vid, !!add, vf); + if (!ret) + igb_set_vf_vlan_strip(adapter, vf, !!vid); + return ret; } static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) @@ -6171,6 +6191,7 @@ static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) igb_set_vmvir(adapter, vf_data->pf_vlan | (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf); igb_set_vmolr(adapter, vf, !vf_data->pf_vlan); + igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan)); /* reset multicast table array for vf */ adapter->vf_data[vf].num_vf_mc_hashes = 0; @@ -7323,6 +7344,8 @@ static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features) ctrl &= ~E1000_CTRL_VME; wr32(E1000_CTRL, ctrl); } + + igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable); } static int igb_vlan_rx_add_vid(struct net_device *netdev, -- cgit v1.2.3 From 46eafa59e18d034ba616fdcca688c388d0bbfd91 Mon Sep 17 00:00:00 2001 From: Stefan Assmann Date: Wed, 3 Feb 2016 09:20:50 +0100 Subject: igb: call ndo_stop() instead of dev_close() when running offline selftest Calling dev_close() causes IFF_UP to be cleared which will remove the interfaces routes and some addresses. That's probably not what the user intended when running the offline selftest. Besides this does not happen if the interface is brought down before the test, so the current behaviour is inconsistent. Instead call the net_device_ops ndo_stop function directly and avoid touching IFF_UP at all. Signed-off-by: Stefan Assmann Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb.h | 2 ++ drivers/net/ethernet/intel/igb/igb_ethtool.c | 4 ++-- drivers/net/ethernet/intel/igb/igb_main.c | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 707ae5c297ea..9413fa61392f 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -510,6 +510,8 @@ enum igb_boards { extern char igb_driver_name[]; extern char igb_driver_version[]; +int igb_open(struct net_device *netdev); +int igb_close(struct net_device *netdev); int igb_up(struct igb_adapter *); void igb_down(struct igb_adapter *); void igb_reinit_locked(struct igb_adapter *); diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 1d329f1d047b..7982243d1f9b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2017,7 +2017,7 @@ static void igb_diag_test(struct net_device *netdev, if (if_running) /* indicate we're in test mode */ - dev_close(netdev); + igb_close(netdev); else igb_reset(adapter); @@ -2050,7 +2050,7 @@ static void igb_diag_test(struct net_device *netdev, clear_bit(__IGB_TESTING, &adapter->state); if (if_running) - dev_open(netdev); + igb_open(netdev); } else { dev_info(&adapter->pdev->dev, "online testing starting\n"); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 3b56f809967c..834b1b6a9277 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -122,8 +122,8 @@ static void igb_setup_mrqc(struct igb_adapter *); static int igb_probe(struct pci_dev *, const struct pci_device_id *); static void igb_remove(struct pci_dev *pdev); static int igb_sw_init(struct igb_adapter *); -static int igb_open(struct net_device *); -static int igb_close(struct net_device *); +int igb_open(struct net_device *); +int igb_close(struct net_device *); static void igb_configure(struct igb_adapter *); static void igb_configure_tx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *); @@ -3172,7 +3172,7 @@ err_setup_tx: return err; } -static int igb_open(struct net_device *netdev) +int igb_open(struct net_device *netdev) { return __igb_open(netdev, false); } @@ -3209,7 +3209,7 @@ static int __igb_close(struct net_device *netdev, bool suspending) return 0; } -static int igb_close(struct net_device *netdev) +int igb_close(struct net_device *netdev) { return __igb_close(netdev, false); } -- cgit v1.2.3 From 4fee7dab07bb2c7dfc3369e0f0e28e3fd4fc00c4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 23 Feb 2016 11:36:02 +0100 Subject: bnx2x: add a separate GENEVE Kconfig symbol When CONFIG_GENEVE is built as a loadable module, and bnx2x is built-in, we get this link error: drivers/net/built-in.o: In function `bnx2x_open': :(.text+0x33322): undefined reference to `geneve_get_rx_port' drivers/net/built-in.o: In function `bnx2x_sp_rtnl_task': :(.text+0x3e632): undefined reference to `geneve_get_rx_port' This avoids the problem by adding a separate Kconfig symbol named CONFIG_BNX2X_GENEVE that is only enabled when the code is reachable from the driver. This is the same trick that BNX2X does for VXLAN support, and is similar to how I40E handles both. Signed-off-by: Arnd Bergmann Fixes: 883ce97d25b0 ("bnx2x: Add Geneve inner-RSS support") Acked-By: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/Kconfig | 10 ++++++++++ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 14 +++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 19f7cd02e085..18042c2460bd 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -149,6 +149,16 @@ config BNX2X_VXLAN Say Y here if you want to enable hardware offload support for Virtual eXtensible Local Area Network (VXLAN) in the driver. +config BNX2X_GENEVE + bool "Generic Network Virtualization Encapsulation (GENEVE) support" + depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m) + ---help--- + This allows one to create GENEVE virtual interfaces that provide + Layer 2 Networks over Layer 3 Networks. GENEVE is often used + to tunnel virtual network infrastructure in virtualized environments. + Say Y here if you want to enable hardware offload support for + Generic Network Virtualization Encapsulation (GENEVE) in the driver. + config BGMAC tristate "BCMA bus GBit core support" depends on BCMA && BCMA_HOST_SOC diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 81fc51c4ec2b..5c95d0c3b076 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -59,7 +59,7 @@ #include #include #include -#if IS_ENABLED(CONFIG_GENEVE) +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) #include #endif #include "bnx2x.h" @@ -10078,7 +10078,7 @@ static void bnx2x_parity_recover(struct bnx2x *bp) } } -#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_GENEVE) +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) static int bnx2x_udp_port_update(struct bnx2x *bp) { struct bnx2x_func_switch_update_params *switch_update_params; @@ -10201,7 +10201,7 @@ static void bnx2x_del_vxlan_port(struct net_device *netdev, } #endif -#if IS_ENABLED(CONFIG_GENEVE) +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) static void bnx2x_add_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { @@ -10327,7 +10327,7 @@ sp_rtnl_not_reset: &bp->sp_rtnl_state)) bnx2x_update_mng_version(bp); -#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_GENEVE) +#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE) if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT, &bp->sp_rtnl_state)) { if (bnx2x_udp_port_update(bp)) { @@ -10344,7 +10344,7 @@ sp_rtnl_not_reset: if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count) vxlan_get_rx_port(bp->dev); #endif -#if IS_ENABLED(CONFIG_GENEVE) +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count) geneve_get_rx_port(bp->dev); #endif @@ -12557,7 +12557,7 @@ static int bnx2x_open(struct net_device *dev) if (IS_PF(bp)) vxlan_get_rx_port(dev); #endif -#if IS_ENABLED(CONFIG_GENEVE) +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) if (IS_PF(bp)) geneve_get_rx_port(dev); #endif @@ -13078,7 +13078,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_add_vxlan_port = bnx2x_add_vxlan_port, .ndo_del_vxlan_port = bnx2x_del_vxlan_port, #endif -#if IS_ENABLED(CONFIG_GENEVE) +#if IS_ENABLED(CONFIG_BNX2X_GENEVE) .ndo_add_geneve_port = bnx2x_add_geneve_port, .ndo_del_geneve_port = bnx2x_del_geneve_port, #endif -- cgit v1.2.3 From 555d5b70f1597906dc2e31085f5e70b49d03a536 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Tue, 23 Feb 2016 13:59:43 +0100 Subject: ppp: clarify parsing of user supplied data in ppp_set_compress() * Split big conditional statement. * Check (data.length <= CCP_MAX_OPTION_LENGTH) only once. * Don't read ccp_option[1] if not initialised. Reading uninitialised ccp_option[1] was harmless, because this could only happen when data.length was 0 or 1. So even then, we couldn't pass the (ccp_option[1] < 2 || ccp_option[1] > data.length) test anyway. Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/ppp_generic.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index fc8ad001bc94..04f4eb34fa80 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2429,13 +2429,15 @@ ppp_set_compress(struct ppp *ppp, unsigned long arg) unsigned char ccp_option[CCP_MAX_OPTION_LENGTH]; err = -EFAULT; - if (copy_from_user(&data, (void __user *) arg, sizeof(data)) || - (data.length <= CCP_MAX_OPTION_LENGTH && - copy_from_user(ccp_option, (void __user *) data.ptr, data.length))) + if (copy_from_user(&data, (void __user *) arg, sizeof(data))) goto out; + if (data.length > CCP_MAX_OPTION_LENGTH) + goto out; + if (copy_from_user(ccp_option, (void __user *) data.ptr, data.length)) + goto out; + err = -EINVAL; - if (data.length > CCP_MAX_OPTION_LENGTH || - ccp_option[1] < 2 || ccp_option[1] > data.length) + if (data.length < 2 || ccp_option[1] < 2 || ccp_option[1] > data.length) goto out; cp = try_then_request_module( -- cgit v1.2.3 From 43154f6fd46bb3bcd363843c9e48f0b8b3a03a08 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:35 -0500 Subject: rtl8xxxu: Minor cleanup to rtl8xxxu_download_firmware() Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6aed923a709a..6b39e4d0abfb 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2100,7 +2100,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) /* 8051 enable */ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16 | SYS_FUNC_CPU_ENABLE); + val16 |= SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); /* MCU firmware download enable */ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); -- cgit v1.2.3 From ef1c0499316b892d700201edeeb013b60be96877 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:36 -0500 Subject: rtl8xxxu: rtl8xxxu_download_firmware(): Cosmetic cleanups Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6b39e4d0abfb..b6a264bc4463 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2105,15 +2105,18 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) /* MCU firmware download enable */ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); - rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_ENABLE); + val8 |= MCU_FW_DL_ENABLE; + rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8); /* 8051 reset */ val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); - rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32 & ~BIT(19)); + val32 &= ~BIT(19); + rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32); /* Reset firmware download checksum */ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); - rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8 | MCU_FW_DL_CSUM_REPORT); + val8 |= MCU_FW_DL_CSUM_REPORT; + rtl8xxxu_write8(priv, REG_MCU_FW_DL, val8); pages = priv->fw_size / RTL_FW_PAGE_SIZE; remainder = priv->fw_size % RTL_FW_PAGE_SIZE; @@ -2122,7 +2125,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) for (i = 0; i < pages; i++) { val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8; - rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i); + val8 |= i; + rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8); ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS, fwptr, RTL_FW_PAGE_SIZE); @@ -2136,7 +2140,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) if (remainder) { val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL + 2) & 0xF8; - rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8 | i); + val8 |= i; + rtl8xxxu_write8(priv, REG_MCU_FW_DL + 2, val8); ret = rtl8xxxu_writeN(priv, REG_FW_START_ADDRESS, fwptr, remainder); if (ret != remainder) { @@ -2149,8 +2154,8 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) fw_abort: /* MCU firmware download disable */ val16 = rtl8xxxu_read16(priv, REG_MCU_FW_DL); - rtl8xxxu_write16(priv, REG_MCU_FW_DL, - val16 & (~MCU_FW_DL_ENABLE & 0xff)); + val16 &= ~MCU_FW_DL_ENABLE; + rtl8xxxu_write16(priv, REG_MCU_FW_DL, val16); return ret; } -- cgit v1.2.3 From 216202ae20ea93585630e6a88486ea23200199cc Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:37 -0500 Subject: rtl8xxxu: If fw running in RAM, reset the 8051 before trying to download a new one Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index b6a264bc4463..e98e0535c14f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2089,7 +2089,7 @@ exit: static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) { int pages, remainder, i, ret; - u8 val8; + u8 val8, sys_func; u16 val16; u32 val32; u8 *fwptr; @@ -2103,6 +2103,23 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) val16 |= SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); + if (val8 & MCU_FW_RAM_SEL) { + pr_info("do the RAM reset\n"); + rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 &= ~BIT(3); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1); + sys_func &= ~BIT(2); + rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, sys_func); + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 |= BIT(3); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func |= BIT(2); + rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, sys_func); + } + /* MCU firmware download enable */ val8 = rtl8xxxu_read8(priv, REG_MCU_FW_DL); val8 |= MCU_FW_DL_ENABLE; -- cgit v1.2.3 From 0635f8cede159bb2205021583a483b0cd3246c74 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:38 -0500 Subject: rtl8xxxu: Add RQPN_[NE]PQ_SHIFT values Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 23208f79b97c..8b27a1c763fd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -352,6 +352,8 @@ #define REG_TXDMA_OFFSET_CHK 0x020c #define REG_TXDMA_STATUS 0x0210 #define REG_RQPN_NPQ 0x0214 +#define RQPN_NPQ_SHIFT 0 +#define RQPN_EPQ_SHIFT 16 /* 0x0280 ~ 0x02FF RXDMA Configuration */ #define REG_RXDMA_AGG_PG_TH 0x0280 -- cgit v1.2.3 From f076977537d9b6ac5d067d416b549aedd591da4f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:39 -0500 Subject: rtl8xxxu: Define SYS_CFG_SW_OFFLOAD_EN This is needed for chips with IOL support. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 8b27a1c763fd..7c795ce61aaa 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -213,6 +213,7 @@ #define SYS_CFG_PCIRSTB BIT(4) #define SYS_CFG_V15_VLD BIT(5) #define SYS_CFG_TRP_B15V_EN BIT(7) +#define SYS_CFG_SW_OFFLOAD_EN BIT(7) /* For chips with IOL support */ #define SYS_CFG_SIC_IDLE BIT(8) #define SYS_CFG_BD_MAC2 BIT(9) #define SYS_CFG_BD_MAC1 BIT(10) -- cgit v1.2.3 From cecfd3cb29f8f39b8859c3c189185df23b69e45c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:40 -0500 Subject: rtl8xxxu: Add REG_TX_REPORT_* defines Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 7c795ce61aaa..89547b68a98b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -456,6 +456,8 @@ #define REG_NEED_CPU_HANDLE 0x04e0 #define REG_PKT_LOSE_RPT 0x04e1 #define REG_PTCL_ERR_STATUS 0x04e2 +#define REG_TX_REPORT_CTRL 0x04ec +#define REG_TX_REPORT_TIME 0x04f0 #define REG_DUMMY 0x04fc /* 0x0500 ~ 0x05FF EDCA Configuration */ -- cgit v1.2.3 From 22229fcb487a161561cfcb08e6a4028d6343b2e7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:41 -0500 Subject: rtl8xxxu: Add more RCR bits Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 89547b68a98b..f7526027db53 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -568,9 +568,18 @@ #define RCR_ACCEPT_CTRL_FRAME BIT(12) #define RCR_ACCEPT_MGMT_FRAME BIT(13) #define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */ +#define RCR_UC_DATA_PKT_INT_ENABLE BIT(16) /* Enable unicast data packet + interrupt */ +#define RCR_BM_DATA_PKT_INT_ENABLE BIT(17) /* Enable broadcast data packet + interrupt */ +#define RCR_TIM_PARSER_ENABLE BIT(18) /* Enable RX beacon TIM parser*/ #define RCR_MFBEN BIT(22) -#define RCR_LSIGEN BIT(23) +#define RCR_LSIG_ENABLE BIT(23) /* Enable LSIG TXOP Protection + function. Search KEYCAM for + each rx packet to check if + LSIGEN bit is set. */ #define RCR_MULTI_BSSID_ENABLE BIT(24) /* Enable Multiple BssId */ +#define RCR_FORCE_ACK BIT(26) #define RCR_ACCEPT_BA_SSN BIT(27) /* Accept BA SSN */ #define RCR_APPEND_PHYSTAT BIT(28) #define RCR_APPEND_ICV BIT(29) -- cgit v1.2.3 From c1edece3c22be23cee3b28f74aecd1736ee816a1 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 3 Feb 2016 13:39:42 -0500 Subject: rtl8xxxu: Fix typo in Kconfig help text Fix spelling of "benefits". Signed-off-by: Tobias Klauser Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig index dd4d626aecbc..8f053c350227 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/Kconfig +++ b/drivers/net/wireless/realtek/rtl8xxxu/Kconfig @@ -13,7 +13,7 @@ config RTL8XXXU This driver is under development and has a limited feature set. In particular it does not yet support 40MHz channels and power management. However it should have a smaller - memory footprint than the vendor drivers and benetifs + memory footprint than the vendor drivers and benefits from the in kernel mac80211 stack. It can coexist with drivers from drivers/staging/rtl8723au, -- cgit v1.2.3 From 98e27cbd9453cd99403ee5f929a11a1000a3090b Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 3 Feb 2016 13:39:43 -0500 Subject: rtl8xxxu: Check return value of kmemdup() In rtl8xxxu_load_firmware() check the return value of kmemdup() and error out with -ENOMEM in case of NULL to prevent a NULL pointer dereference. Signed-off-by: Tobias Klauser Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e98e0535c14f..08d8e1fa44f0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2197,6 +2197,10 @@ static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name) } priv->fw_data = kmemdup(fw->data, fw->size, GFP_KERNEL); + if (!priv->fw_data) { + ret = -ENOMEM; + goto exit; + } priv->fw_size = fw->size - sizeof(struct rtl8xxxu_firmware_header); signature = le16_to_cpu(priv->fw_data->signature); -- cgit v1.2.3 From d48fe60e06f404acf1acb346a4bf2d7b0e1d4d86 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:44 -0500 Subject: rtl8xxxu: Implement rtl8xxxu_reset_8051() We need to reset the 8051 in order for it to launch the fw on the rtl8192eu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 38 ++++++++++++++++-------- 1 file changed, 26 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 08d8e1fa44f0..7a571efbdc1c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2043,6 +2043,24 @@ exit: return ret; } +static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 sys_func; + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 &= ~BIT(3); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); + sys_func &= ~SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 |= BIT(3); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func |= SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); +} + static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; @@ -2067,6 +2085,12 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) val32 &= ~MCU_WINT_INIT_READY; rtl8xxxu_write32(priv, REG_MCU_FW_DL, val32); + /* + * Reset the 8051 in order for the firmware to start running, + * otherwise it won't come up on the 8192eu + */ + rtl8xxxu_reset_8051(priv); + /* Wait for firmware to become ready */ for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) { val32 = rtl8xxxu_read32(priv, REG_MCU_FW_DL); @@ -2089,7 +2113,7 @@ exit: static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) { int pages, remainder, i, ret; - u8 val8, sys_func; + u8 val8; u16 val16; u32 val32; u8 *fwptr; @@ -2107,17 +2131,7 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) if (val8 & MCU_FW_RAM_SEL) { pr_info("do the RAM reset\n"); rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); - val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); - val8 &= ~BIT(3); - rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); - sys_func = rtl8xxxu_read8(priv, REG_SYS_FUNC + 1); - sys_func &= ~BIT(2); - rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, sys_func); - val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); - val8 |= BIT(3); - rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); - sys_func |= BIT(2); - rtl8xxxu_write8(priv, REG_SYS_FUNC + 1, sys_func); + rtl8xxxu_reset_8051(priv); } /* MCU firmware download enable */ -- cgit v1.2.3 From af9e4d6d9d07857b5fb7711832e8cda745e336ae Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:45 -0500 Subject: rtl8xxxu: Add definitions for antenna selection registers for 8723BU Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index f7526027db53..997a85792f48 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -756,6 +756,10 @@ #define REG_FPGA1_RF_MODE 0x0900 #define REG_FPGA1_TX_INFO 0x090c +#define REG_DPDT_CTRL 0x092c /* 8723BU */ +#define REG_RFE_CTRL_ANTA_SRC 0x0930 /* 8723BU */ +#define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */ +#define REG_RFE_BUFFER 0x0944 /* 8723BU */ #define REG_CCK0_SYSTEM 0x0a00 #define CCK0_SIDEBAND BIT(4) -- cgit v1.2.3 From 29c2139cd04ad23226c690221450b93bedaceea4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:46 -0500 Subject: rtl8xxxu: Add mbox extension register definitions for 8723B Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 997a85792f48..991b3664bd7e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -341,6 +341,11 @@ #define REG_BB_ACCEESS_CTRL 0x01e8 #define REG_BB_ACCESS_DATA 0x01ec +#define REG_HMBOX_EXT0_8723B 0x01f0 +#define REG_HMBOX_EXT1_8723B 0x01f4 +#define REG_HMBOX_EXT2_8723B 0x01f8 +#define REG_HMBOX_EXT3_8723B 0x01fc + /* 0x0200 ~ 0x027F TXDMA Configuration */ #define REG_RQPN 0x0200 #define RQPN_HI_PQ_SHIFT 0 -- cgit v1.2.3 From ce6f2e3669547ad18ea67761aa9cc32cb4407bf2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:47 -0500 Subject: rtl8xxxu: Add REG_OFDM0_RX_D_SYNC_PATH definitions Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 991b3664bd7e..d2995f8a3311 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -793,6 +793,9 @@ #define REG_OFDM0_ENERGY_CCA_THRES 0x0c4c +#define REG_OFDM0_RX_D_SYNC_PATH 0x0c40 +#define OFDM0_SYNC_PATH_NOTCH_FILTER BIT(1) + #define REG_OFDM0_XA_AGC_CORE1 0x0c50 #define REG_OFDM0_XA_AGC_CORE2 0x0c54 #define REG_OFDM0_XB_AGC_CORE1 0x0c58 -- cgit v1.2.3 From e5c447ccf8e568134847bb491663e26910a21d8a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:48 -0500 Subject: rtl8xxxu: Remove unused clutter for handling recursive calls to rtl8xxxu_init_device() This was a leftover from the vendor driver that was never utilized. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 12 +----------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 - 2 files changed, 1 insertion(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7a571efbdc1c..4a6939f1454f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4289,17 +4289,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0); - /* - * Not sure if we should get into this at all - */ - if (priv->iqk_initialized) { - rtl8xxxu_restore_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, - priv->bb_recovery_backup, - RTL8XXXU_BB_REGS); - } else { - rtl8723a_phy_iq_calibrate(priv); - priv->iqk_initialized = true; - } + rtl8723a_phy_iq_calibrate(priv); /* * This should enable thermal meter diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index f2a1bac6c8ec..f71fb8dd951c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -652,7 +652,6 @@ struct rtl8xxxu_priv { u32 bb_recovery_backup[RTL8XXXU_BB_REGS]; u32 rtlchip; u8 pi_enabled:1; - u8 iqk_initialized:1; u8 int_buf[USB_INTR_CONTENT_LENGTH]; }; -- cgit v1.2.3 From 38451998fb8a5f9c933b2c462a790d5633605ac8 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Wed, 3 Feb 2016 13:39:49 -0500 Subject: rtl8xxxu: Use REG_EFUSE_TEST register only on multifunctional devices rtl8192cu driver doesn't read/write the REG_EFUSE_TEST register. Neither does the rtl8188eu driver. Do it only for multifunc devices RTL8723AU/RTL8723BU. Signed-off-by: Jakub Sitnicki Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 9 ++++++--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4a6939f1454f..94d3d845d36e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1704,6 +1704,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->has_bluetooth = 1; if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; + priv->is_multi_func = 1; } else if (val32 & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; @@ -1938,9 +1939,11 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) if (val16 & EEPROM_BOOT) priv->boot_eeprom = 1; - val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST); - val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT; - rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32); + if (priv->is_multi_func) { + val32 = rtl8xxxu_read32(priv, REG_EFUSE_TEST); + val32 = (val32 & ~EFUSE_SELECT_MASK) | EFUSE_WIFI_SELECT; + rtl8xxxu_write32(priv, REG_EFUSE_TEST, val32); + } dev_dbg(dev, "Booting from %s\n", priv->boot_eeprom ? "EEPROM" : "EFUSE"); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index f71fb8dd951c..bbd0f6b76b82 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -597,6 +597,7 @@ struct rtl8xxxu_priv { struct rtl8723au_idx ht20_max_power_offset[3]; u32 chip_cut:4; u32 rom_rev:4; + u32 is_multi_func:1; u32 has_wifi:1; u32 has_bluetooth:1; u32 enable_bluetooth:1; -- cgit v1.2.3 From 56e4374a5e08b6971069de4bb0824c2aa2149a8f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:50 -0500 Subject: rtl8xxxu: Fix cosmetics to quiet down checkpatch police Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 94d3d845d36e..ac4c211184df 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4415,7 +4415,7 @@ static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv, } static void rtl8xxxu_sw_scan_start(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, const u8* mac) + struct ieee80211_vif *vif, const u8 *mac) { struct rtl8xxxu_priv *priv = hw->priv; u8 val8; -- cgit v1.2.3 From 3bed4bfa51f2635ff21c73f83503e3e8ac233db2 Mon Sep 17 00:00:00 2001 From: Bruno Randolf Date: Wed, 3 Feb 2016 13:39:51 -0500 Subject: rtl8xxxu: Enable monitor mode by handling filters Monitor mode is enabled by handling the filter flags we get from mac80211 in rtl8xxxu_configure_filter() and writing them to the RCR register. By handling the filters, we can also stop setting the BSSID filters in the association event. Signed-off-by: Bruno Randolf Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 56 +++++++++++++++++++----- 1 file changed, 45 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ac4c211184df..b58a60f8ceb1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4199,7 +4199,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) * Configure initial WMAC settings */ val32 = RCR_ACCEPT_PHYS_MATCH | RCR_ACCEPT_MCAST | RCR_ACCEPT_BCAST | - /* RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON | */ RCR_ACCEPT_MGMT_FRAME | RCR_HTC_LOC_CTRL | RCR_APPEND_PHYSTAT | RCR_APPEND_ICV | RCR_APPEND_MIC; rtl8xxxu_write32(priv, REG_RCR, val32); @@ -4522,10 +4521,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rtl8xxxu_update_rate_mask(priv, ramask, sgi); - val32 = rtl8xxxu_read32(priv, REG_RCR); - val32 |= RCR_CHECK_BSSID_MATCH | RCR_CHECK_BSSID_BEACON; - rtl8xxxu_write32(priv, REG_RCR, val32); - /* Enable RX of data frames */ rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff); @@ -4539,11 +4534,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, h2c.joinbss.data = H2C_JOIN_BSS_CONNECT; } else { - val32 = rtl8xxxu_read32(priv, REG_RCR); - val32 &= ~(RCR_CHECK_BSSID_MATCH | - RCR_CHECK_BSSID_BEACON); - rtl8xxxu_write32(priv, REG_RCR, val32); - val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); val8 |= BEACON_DISABLE_TSF_UPDATE; rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); @@ -5318,11 +5308,55 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, unsigned int *total_flags, u64 multicast) { struct rtl8xxxu_priv *priv = hw->priv; + u32 rcr = rtl8xxxu_read32(priv, REG_RCR); dev_dbg(&priv->udev->dev, "%s: changed_flags %08x, total_flags %08x\n", __func__, changed_flags, *total_flags); - *total_flags &= (FIF_ALLMULTI | FIF_CONTROL | FIF_BCN_PRBRESP_PROMISC); + /* + * FIF_ALLMULTI ignored as all multicast frames are accepted (REG_MAR) + */ + + if (*total_flags & FIF_FCSFAIL) + rcr |= RCR_ACCEPT_CRC32; + else + rcr &= ~RCR_ACCEPT_CRC32; + + /* + * FIF_PLCPFAIL not supported? + */ + + if (*total_flags & FIF_BCN_PRBRESP_PROMISC) + rcr &= ~RCR_CHECK_BSSID_BEACON; + else + rcr |= RCR_CHECK_BSSID_BEACON; + + if (*total_flags & FIF_CONTROL) + rcr |= RCR_ACCEPT_CTRL_FRAME; + else + rcr &= ~RCR_ACCEPT_CTRL_FRAME; + + if (*total_flags & FIF_OTHER_BSS) { + rcr |= RCR_ACCEPT_AP; + rcr &= ~RCR_CHECK_BSSID_MATCH; + } else { + rcr &= ~RCR_ACCEPT_AP; + rcr |= RCR_CHECK_BSSID_MATCH; + } + + if (*total_flags & FIF_PSPOLL) + rcr |= RCR_ACCEPT_PM; + else + rcr &= ~RCR_ACCEPT_PM; + + /* + * FIF_PROBE_REQ ignored as probe requests always seem to be accepted + */ + + rtl8xxxu_write32(priv, REG_RCR, rcr); + + *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | \ + FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | FIF_PROBE_REQ); } static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts) -- cgit v1.2.3 From b40027ba8db1b2fe022bf42505cbf64cb0114eba Mon Sep 17 00:00:00 2001 From: Bruno Randolf Date: Wed, 3 Feb 2016 13:39:52 -0500 Subject: rtl8xxxu: Document REG_RXFLTMAP registers Add comments describing how REG_RXFLTMAP0, REG_RXFLTMAP1 and REG_RXFLTMAP2 work. Signed-off-by: Bruno Randolf Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 26 +++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index d2995f8a3311..d089eddc920e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -569,9 +569,12 @@ (Rx beacon, probe rsp) */ #define RCR_ACCEPT_CRC32 BIT(8) /* Accept CRC32 error packet */ #define RCR_ACCEPT_ICV BIT(9) /* Accept ICV error packet */ -#define RCR_ACCEPT_DATA_FRAME BIT(11) -#define RCR_ACCEPT_CTRL_FRAME BIT(12) -#define RCR_ACCEPT_MGMT_FRAME BIT(13) +#define RCR_ACCEPT_DATA_FRAME BIT(11) /* Accept all data pkt or use + REG_RXFLTMAP2 */ +#define RCR_ACCEPT_CTRL_FRAME BIT(12) /* Accept all control pkt or use + REG_RXFLTMAP1 */ +#define RCR_ACCEPT_MGMT_FRAME BIT(13) /* Accept all mgmt pkt or use + REG_RXFLTMAP0 */ #define RCR_HTC_LOC_CTRL BIT(14) /* MFC<--HTC=1 MFC-->HTC=0 */ #define RCR_UC_DATA_PKT_INT_ENABLE BIT(16) /* Enable unicast data packet interrupt */ @@ -651,9 +654,20 @@ #define REG_LPNAV_CTRL 0x0694 #define REG_WKFMCAM_CMD 0x0698 #define REG_WKFMCAM_RWD 0x069c -#define REG_RXFLTMAP0 0x06a0 -#define REG_RXFLTMAP1 0x06a2 -#define REG_RXFLTMAP2 0x06a4 + +/* + * RX Filters: each bit corresponds to the numerical value of the subtype. + * If it is set the subtype frame type is passed. The filter is only used when + * the RCR_ACCEPT_DATA_FRAME, RCR_ACCEPT_CTRL_FRAME, RCR_ACCEPT_MGMT_FRAME bit + * in the RCR are low. + * + * Example: Beacon subtype is binary 1000 which is decimal 8 so we have to set + * bit 8 (0x100) in REG_RXFLTMAP0 to enable reception. + */ +#define REG_RXFLTMAP0 0x06a0 /* Management frames */ +#define REG_RXFLTMAP1 0x06a2 /* Control frames */ +#define REG_RXFLTMAP2 0x06a4 /* Data frames */ + #define REG_BCN_PSR_RPT 0x06a8 #define REG_CALB32K_CTRL 0x06ac #define REG_PKT_MON_CTRL 0x06b4 -- cgit v1.2.3 From 124bc63c995d32b647560ca1951006f5954a53bb Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:53 -0500 Subject: rtl8xxxu: Remove duplicate USB ID Reported-by: Xose Vazquez Perez Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index b58a60f8ceb1..78683b56ae2c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5959,8 +5959,6 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0xcdab, 0x8010, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, -{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x317f, 0xff, 0xff, 0xff), - .driver_info = (unsigned long)&rtl8192cu_fops}, /* Netcore 8188RU */ {USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff7, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192cu_fops}, {USB_DEVICE_AND_INTERFACE_INFO(0x04f2, 0xaff9, 0xff, 0xff, 0xff), -- cgit v1.2.3 From 755bda116e8a724f0e6b6514fbeb6d761b091b1e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:54 -0500 Subject: rtl8xxxu: Fix 80 character per line issue Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 78683b56ae2c..9f89f62a9f0a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5355,8 +5355,9 @@ static void rtl8xxxu_configure_filter(struct ieee80211_hw *hw, rtl8xxxu_write32(priv, REG_RCR, rcr); - *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | \ - FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | FIF_PROBE_REQ); + *total_flags &= (FIF_ALLMULTI | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | + FIF_CONTROL | FIF_OTHER_BSS | FIF_PSPOLL | + FIF_PROBE_REQ); } static int rtl8xxxu_set_rts_threshold(struct ieee80211_hw *hw, u32 rts) -- cgit v1.2.3 From c85ea1156d787ef52bbb835bcf2d5117d16282e1 Mon Sep 17 00:00:00 2001 From: Bruno Randolf Date: Wed, 3 Feb 2016 13:39:55 -0500 Subject: rtl8xxxu: Enable data frame reception in rtl8xxxu_start mac80211 documentation says, the ieee80211_ops.start callback "must turn on frame reception (for possibly enabled monitor interfaces.)". If not a single monitor interface does not receive data frames. Similarly we should not change the data reception based on the association state. Signed-off-by: Bruno Randolf Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 9f89f62a9f0a..1b7ff74fe9b3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4521,9 +4521,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rtl8xxxu_update_rate_mask(priv, ramask, sgi); - /* Enable RX of data frames */ - rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff); - rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff); rtl8723a_stop_tx_beacon(priv); @@ -4538,8 +4535,6 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, val8 |= BEACON_DISABLE_TSF_UPDATE; rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); - /* Disable RX of data frames */ - rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT; } h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT; @@ -5542,12 +5537,9 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) } exit: /* - * Disable all data frames - */ - rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0x0000); - /* - * Accept all mgmt frames + * Accept all data and mgmt frames */ + rtl8xxxu_write16(priv, REG_RXFLTMAP2, 0xffff); rtl8xxxu_write16(priv, REG_RXFLTMAP0, 0xffff); rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, 0x6954341e); -- cgit v1.2.3 From a26703f39d2fe8f06ca2327d1a3f011d85b56c7f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:56 -0500 Subject: rtl8xxxu: rtl8xxxu_set_linktype(): Be consistent aboutregister size REG_MSR Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 1b7ff74fe9b3..c8ba698badfe 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1599,9 +1599,9 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, enum nl80211_iftype linktype) { - u16 val8; + u8 val8; - val8 = rtl8xxxu_read16(priv, REG_MSR); + val8 = rtl8xxxu_read8(priv, REG_MSR); val8 &= ~MSR_LINKTYPE_MASK; switch (linktype) { -- cgit v1.2.3 From 53b381c4d72a58ca7df75074d75df9355b77fc99 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:57 -0500 Subject: rtl8xxxu: Set correct bit to reset MCU IO wrapper All chips, except for 8812 require BIT(0) for resetting the MCU IO wrapper. 8723b requires a more complicated reset sequence, so it will need a custom reset function. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c8ba698badfe..460145cca205 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2052,13 +2052,13 @@ static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv) u16 sys_func; val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); - val8 &= ~BIT(3); + val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); sys_func &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); - val8 |= BIT(3); + val8 |= BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); sys_func |= SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); -- cgit v1.2.3 From 796c554257b1a8a61b1112ac03bbbf3221c5a65f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:58 -0500 Subject: rtl8xxxu: Remove unused mgmt variable from rtl8xxxu_rx_complete() Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 460145cca205..12018362f2b1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5031,7 +5031,6 @@ static void rtl8xxxu_rx_complete(struct urb *urb) struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; struct rtl8723au_phy_stats *phy_stats; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - struct ieee80211_mgmt *mgmt; struct device *dev = &priv->udev->dev; __le32 *_rx_desc_le = (__le32 *)skb->data; u32 *_rx_desc = (u32 *)skb->data; @@ -5052,8 +5051,6 @@ static void rtl8xxxu_rx_complete(struct urb *urb) skb_pull(skb, drvinfo_sz + desc_shift); - mgmt = (struct ieee80211_mgmt *)skb->data; - memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); if (rx_desc->phy_stats) -- cgit v1.2.3 From a9ffa615aa4c661ce905f73f1b2d43e17ee55194 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:39:59 -0500 Subject: rtl8xxxu: Remove unused len/cnt variables from rtl8xxxu_rx_complete() Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 12018362f2b1..a6c8dabc87ba 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5034,13 +5034,11 @@ static void rtl8xxxu_rx_complete(struct urb *urb) struct device *dev = &priv->udev->dev; __le32 *_rx_desc_le = (__le32 *)skb->data; u32 *_rx_desc = (u32 *)skb->data; - int cnt, len, drvinfo_sz, desc_shift, i; + int drvinfo_sz, desc_shift, i; for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); - cnt = rx_desc->frag; - len = rx_desc->pktlen; drvinfo_sz = rx_desc->drvinfo_sz * 8; desc_shift = rx_desc->shift; skb_put(skb, urb->actual_length); -- cgit v1.2.3 From dc254ca41134d9bbc6227d133ef5e220f14529b5 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:40:00 -0500 Subject: rtl8xxxu: Do not mask RF registers to 6 bits Newer chips have RF registers beyond the original 0x3f address. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a6c8dabc87ba..af17ae7e6cdd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2536,8 +2536,6 @@ static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv, continue; } - reg &= 0x3f; - ret = rtl8xxxu_write_rfreg(priv, path, reg, val); if (ret) { dev_warn(&priv->udev->dev, -- cgit v1.2.3 From 4042e61735ce79c227a67a893d5738f297a96b9c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:40:01 -0500 Subject: rtl8xxxu: Make device_init kludge 8723au only Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index af17ae7e6cdd..fdeae3bada4d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4343,14 +4343,17 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); - /* - * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, - * but we need to fin root cause. - */ - val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); - if ((val32 & 0xff000000) != 0x83000000) { - val32 |= FPGA_RF_MODE_CCK; - rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + if (priv->rtlchip == 0x8723a) { + /* + * 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test, + * but we need to find root cause. + * This is 8723au only. + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + if ((val32 & 0xff000000) != 0x83000000) { + val32 |= FPGA_RF_MODE_CCK; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + } } val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL); -- cgit v1.2.3 From 5b22a111aa15bb8a010a0bec1da6a4b6690dd561 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Wed, 3 Feb 2016 13:40:02 -0500 Subject: rtl8xxxu: Add missing bit define for REG_APS_FSMCO Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index d089eddc920e..8f6c9c6c7c09 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -45,6 +45,7 @@ #define APS_FSMCO_ENABLE_POWERDOWN BIT(4) #define APS_FSMCO_MAC_ENABLE BIT(8) #define APS_FSMCO_MAC_OFF BIT(9) +#define APS_FSMCO_SW_LPS BIT(10) #define APS_FSMCO_HW_SUSPEND BIT(11) #define APS_FSMCO_PCIE BIT(12) #define APS_FSMCO_HW_POWERDOWN BIT(15) -- cgit v1.2.3 From 19c8f421a61947116898c9f8a28823b9d988df74 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Mon, 8 Feb 2016 00:00:30 +0100 Subject: brcmfmac: Increase nr of supported flowrings. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit New generation devices have firmware which has more than 256 flowrings. E.g. following debugging message comes from 14e4:4365 BCM4366: [ 194.606245] brcmfmac: brcmf_pcie_init_ringbuffers Nr of flowrings is 264 At various code places (related to flowrings) we were using u8 which could lead to storing wrong number or infinite loops when indexing with this type. This issue was quite easy to spot in brcmf_flowring_detach where it led to infinite loop e.g. on failed initialization. This patch switches code to proper types and increases the maximum number of supported flowrings to 512. Originally this change was sent in September 2015, but back it was causing a regression on BCM43602 resulting in: Unable to handle kernel NULL pointer dereference at virtual address ... The reason for this regression was missing update (s/u8/u16) of struct brcmf_flowring_ring. This problem was handled in 9f64df9 ("brcmfmac: Fix bug in flowring management."). Starting with that it's safe to apply this original patch as it doesn't cause a regression anymore. This patch fixes an infinite loop on BCM4366 which is supported since 4.4 so it makes sense to apply it to stable 4.4+. Cc: # 4.4+ Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/flowring.c | 38 ++++++++++++---------- .../broadcom/brcm80211/brcmfmac/flowring.h | 20 ++++++------ .../wireless/broadcom/brcm80211/brcmfmac/msgbuf.c | 11 +++++-- .../wireless/broadcom/brcm80211/brcmfmac/msgbuf.h | 2 +- 4 files changed, 41 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c index 2ca783fa50cf..7e269f9aa607 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c @@ -32,7 +32,7 @@ #define BRCMF_FLOWRING_LOW (BRCMF_FLOWRING_HIGH - 256) #define BRCMF_FLOWRING_INVALID_IFIDX 0xff -#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] + fifo + ifidx * 16) +#define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16) #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16) static const u8 brcmf_flowring_prio2fifo[] = { @@ -68,7 +68,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], u8 prio, u8 ifidx) { struct brcmf_flowring_hash *hash; - u8 hash_idx; + u16 hash_idx; u32 i; bool found; bool sta; @@ -88,6 +88,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], } hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) : BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx); + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); found = false; hash = flow->hash; for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { @@ -98,6 +99,7 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], break; } hash_idx++; + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); } if (found) return hash[hash_idx].flowid; @@ -111,7 +113,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], { struct brcmf_flowring_ring *ring; struct brcmf_flowring_hash *hash; - u8 hash_idx; + u16 hash_idx; u32 i; bool found; u8 fifo; @@ -131,6 +133,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], } hash_idx = sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) : BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx); + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); found = false; hash = flow->hash; for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) { @@ -140,6 +143,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], break; } hash_idx++; + hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1); } if (found) { for (i = 0; i < flow->nrofrings; i++) { @@ -169,7 +173,7 @@ u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], } -u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid) +u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; @@ -179,7 +183,7 @@ u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid) } -static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid, +static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid, bool blocked) { struct brcmf_flowring_ring *ring; @@ -228,10 +232,10 @@ static void brcmf_flowring_block(struct brcmf_flowring *flow, u8 flowid, } -void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) +void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; - u8 hash_idx; + u16 hash_idx; struct sk_buff *skb; ring = flow->rings[flowid]; @@ -253,7 +257,7 @@ void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid) } -u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb) { struct brcmf_flowring_ring *ring; @@ -279,7 +283,7 @@ u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, } -struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid) +struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; struct sk_buff *skb; @@ -300,7 +304,7 @@ struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid) } -void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, +void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb) { struct brcmf_flowring_ring *ring; @@ -311,7 +315,7 @@ void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, } -u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid) +u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; @@ -326,7 +330,7 @@ u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid) } -void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid) +void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; @@ -340,10 +344,10 @@ void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid) } -u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid) +u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid) { struct brcmf_flowring_ring *ring; - u8 hash_idx; + u16 hash_idx; ring = flow->rings[flowid]; hash_idx = ring->hash_id; @@ -384,7 +388,7 @@ void brcmf_flowring_detach(struct brcmf_flowring *flow) struct brcmf_pub *drvr = bus_if->drvr; struct brcmf_flowring_tdls_entry *search; struct brcmf_flowring_tdls_entry *remove; - u8 flowid; + u16 flowid; for (flowid = 0; flowid < flow->nrofrings; flowid++) { if (flow->rings[flowid]) @@ -408,7 +412,7 @@ void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx, struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev); struct brcmf_pub *drvr = bus_if->drvr; u32 i; - u8 flowid; + u16 flowid; if (flow->addr_mode[ifidx] != addr_mode) { for (i = 0; i < ARRAY_SIZE(flow->hash); i++) { @@ -434,7 +438,7 @@ void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx, struct brcmf_flowring_tdls_entry *prev; struct brcmf_flowring_tdls_entry *search; u32 i; - u8 flowid; + u16 flowid; bool sta; sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h index 95fd1c9675d1..068e68d94999 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h @@ -16,7 +16,7 @@ #define BRCMFMAC_FLOWRING_H -#define BRCMF_FLOWRING_HASHSIZE 256 +#define BRCMF_FLOWRING_HASHSIZE 512 /* has to be 2^x */ #define BRCMF_FLOWRING_INVALID_ID 0xFFFFFFFF @@ -24,7 +24,7 @@ struct brcmf_flowring_hash { u8 mac[ETH_ALEN]; u8 fifo; u8 ifidx; - u8 flowid; + u16 flowid; }; enum ring_status { @@ -61,16 +61,16 @@ u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN], u8 prio, u8 ifidx); u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN], u8 prio, u8 ifidx); -void brcmf_flowring_delete(struct brcmf_flowring *flow, u8 flowid); -void brcmf_flowring_open(struct brcmf_flowring *flow, u8 flowid); -u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u8 flowid); -u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u8 flowid, +void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid); +void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid); +u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid); +u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb); -struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u8 flowid); -void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u8 flowid, +struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid); +void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid, struct sk_buff *skb); -u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u8 flowid); -u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u8 flowid); +u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid); +u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid); struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings); void brcmf_flowring_detach(struct brcmf_flowring *flow); void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index c2bdb91746cf..922966734a7f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -677,7 +677,7 @@ static u32 brcmf_msgbuf_flowring_create(struct brcmf_msgbuf *msgbuf, int ifidx, } -static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u8 flowid) +static void brcmf_msgbuf_txflow(struct brcmf_msgbuf *msgbuf, u16 flowid) { struct brcmf_flowring *flow = msgbuf->flow; struct brcmf_commonring *commonring; @@ -1310,7 +1310,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev) } -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid) +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid) { struct brcmf_msgbuf *msgbuf = (struct brcmf_msgbuf *)drvr->proto->pd; struct msgbuf_tx_flowring_delete_req *delete; @@ -1415,6 +1415,13 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr) u32 count; if_msgbuf = drvr->bus_if->msgbuf; + + if (if_msgbuf->nrof_flowrings >= BRCMF_FLOWRING_HASHSIZE) { + brcmf_err("driver not configured for this many flowrings %d\n", + if_msgbuf->nrof_flowrings); + if_msgbuf->nrof_flowrings = BRCMF_FLOWRING_HASHSIZE - 1; + } + msgbuf = kzalloc(sizeof(*msgbuf), GFP_KERNEL); if (!msgbuf) goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h index 3d513e407e3d..ee6906a3c3f6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.h @@ -33,7 +33,7 @@ int brcmf_proto_msgbuf_rx_trigger(struct device *dev); -void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u8 flowid); +void brcmf_msgbuf_delete_flowring(struct brcmf_pub *drvr, u16 flowid); int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr); void brcmf_proto_msgbuf_detach(struct brcmf_pub *drvr); #else -- cgit v1.2.3 From 10fbc7cf031d7253770331417a4adc4179c979c5 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 8 Feb 2016 21:41:12 +0100 Subject: b43: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Patch was generated using the following semantic patch: // @@ expression irq; @@ -synchronize_irq(irq); free_irq(irq, ...); // Signed-off-by: Lars-Peter Clausen Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/b43/main.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index c279211e49f9..72380af9dc52 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -4375,12 +4375,10 @@ redo: /* Synchronize and free the interrupt handlers. Unlock to avoid deadlocks. */ orig_dev = dev; mutex_unlock(&wl->mutex); - if (b43_bus_host_is_sdio(dev->dev)) { + if (b43_bus_host_is_sdio(dev->dev)) b43_sdio_free_irq(dev); - } else { - synchronize_irq(dev->dev->irq); + else free_irq(dev->dev->irq, dev); - } mutex_lock(&wl->mutex); dev = wl->current_dev; if (!dev) -- cgit v1.2.3 From 400b43c5826964598e06bd3373ea4523ce749c5e Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Mon, 8 Feb 2016 21:41:13 +0100 Subject: rtlwifi: Remove unnecessary synchronize_irq() before free_irq() Calling synchronize_irq() right before free_irq() is quite useless. On one hand the IRQ can easily fire again before free_irq() is entered, on the other hand free_irq() itself calls synchronize_irq() internally (in a race condition free way), before any state associated with the IRQ is freed. Patch was generated using the following semantic patch: // @@ expression irq; @@ -synchronize_irq(irq); free_irq(irq, ...); // Signed-off-by: Lars-Peter Clausen Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/pci.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 7f471bff435c..140d2541562d 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -2392,7 +2392,6 @@ void rtl_pci_disconnect(struct pci_dev *pdev) rtlpriv->cfg->ops->deinit_sw_vars(hw); if (rtlpci->irq_alloc) { - synchronize_irq(rtlpci->pdev->irq); free_irq(rtlpci->pdev->irq, hw); rtlpci->irq_alloc = 0; } -- cgit v1.2.3 From ccf5fa4f33ba6488579775fd3193739e70378b90 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 9 Feb 2016 14:15:36 +0300 Subject: mwifiex: fix an indenting mistake We recently added an indent level here but missed this line. It needs another tab. Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index cc072142411a..efb19e2e1169 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -2452,7 +2452,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter) for (i = 0; i < 4; i++) { mwifiex_read_reg_byte(adapter, reg, &read_reg); memory_size |= (read_reg << (i * 8)); - reg++; + reg++; } } else { memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE; -- cgit v1.2.3 From 9696a159c3989e34aa9a6e658dcb89c8fcda0a80 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 11 Feb 2016 10:53:09 -0600 Subject: rtlwifi: Prepare for reworking 5G channels There are 3 drivers in this family that have 5G radios. Each of them defines local copies of the available channels. This patch adds the two arrays to the core driver. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/core.c | 16 ++++++++++++++++ drivers/net/wireless/realtek/rtlwifi/wifi.h | 9 ++++----- 2 files changed, 20 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 4ae421ef30d9..02eba0e2afaa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -35,6 +35,22 @@ #include #include +u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { + 36, 38, 40, 42, 44, 46, 48, /* Band 1 */ + 52, 54, 56, 58, 60, 62, 64, /* Band 2 */ + 100, 102, 104, 106, 108, 110, 112, /* Band 3 */ + 116, 118, 120, 122, 124, 126, 128, /* Band 3 */ + 132, 134, 136, 138, 140, 142, 144, /* Band 3 */ + 149, 151, 153, 155, 157, 159, 161, /* Band 4 */ + 165, 167, 169, 171, 173, 175, 177 /* Band 4 */ +}; +EXPORT_SYMBOL(channel5g); + +u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { + 42, 58, 106, 122, 138, 155, 171 +}; +EXPORT_SYMBOL(channel5g_80m); + void rtl_addr_delay(u32 addr) { if (addr == 0xfe) diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 4544752a2ba8..e603819b2953 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -122,11 +122,6 @@ */ #define CHANNEL_MAX_NUMBER_5G_80M 7 #define CHANNEL_GROUP_MAX (3 + 9) /* ch1~3, 4~9, 10~14 = three groups */ -#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to - *"phy_GetChnlGroup8812A" and - * "Hal_ReadTxPowerInfo8812A" - */ -#define CHANNEL_MAX_NUMBER_5G_80M 7 #define MAX_PG_GROUP 13 #define CHANNEL_GROUP_MAX_2G 3 #define CHANNEL_GROUP_IDX_5GL 3 @@ -2904,6 +2899,10 @@ value to host byte ordering.*/ #define STBC_VHT_TEST_TX_ENABLE BIT(2) #define STBC_VHT_CAP_TX BIT(3) +extern u8 channel5g[CHANNEL_MAX_NUMBER_5G]; + +extern u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M]; + static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr) { return rtlpriv->io.read8_sync(rtlpriv, addr); -- cgit v1.2.3 From bb6fa826ba308ea4db55629ae383e66ded01bfdd Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 11 Feb 2016 10:53:10 -0600 Subject: rtlwifi: rtl8192de: Convert driver to use common 5G channels This driver defines its owh copy of the 5G channels. Change it to use the common definitions. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8192de/phy.c | 23 ++++------------------ 1 file changed, 4 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c index bb06fe836fe7..7810fe87dca7 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c @@ -924,19 +924,11 @@ static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw, static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl) { - u8 channel_5g[59] = { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, - 130, 132, 134, 136, 138, 140, 149, 151, - 153, 155, 157, 159, 161, 163, 165 - }; u8 place = chnl; if (chnl > 14) { - for (place = 14; place < sizeof(channel_5g); place++) { - if (channel_5g[place] == chnl) { + for (place = 14; place < sizeof(channel5g); place++) { + if (channel5g[place] == chnl) { place++; break; } @@ -2471,16 +2463,9 @@ static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel) { int i; - u8 channel_5g[45] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, - 60, 62, 64, 100, 102, 104, 106, 108, 110, 112, - 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, - 134, 136, 138, 140, 149, 151, 153, 155, 157, 159, - 161, 163, 165 - }; - for (i = 0; i < sizeof(channel_5g); i++) - if (channel == channel_5g[i]) + for (i = 0; i < sizeof(channel5g); i++) + if (channel == channel5g[i]) return true; return false; } -- cgit v1.2.3 From 2784b00aa316b430e9440087dd15c94557a27c4a Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 11 Feb 2016 10:53:11 -0600 Subject: rtlwifi: rtl8192ee: Convert driver to use new 5G channel tables The driver defines its own set of channel tables for the 5G band. With this change, it will use those of the core. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c index 5f14308e8eb3..9fd3f1b6e4a8 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c @@ -2018,18 +2018,6 @@ static void _rtl92ee_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct rtl_efuse *efu = rtl_efuse(rtl_priv(hw)); struct txpower_info_2g pwr2g; struct txpower_info_5g pwr5g; - u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, - 56, 58, 60, 62, 64, 100, 102, 104, 106, - 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, - 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 168, 169, 171, - 173, 175, 177 - }; - u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { - 42, 58, 106, 122, 138, 155, 171 - }; u8 rf, idx; u8 i; -- cgit v1.2.3 From 0a44b22009d52f93b10f14bb4765915e8bafb6a5 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 11 Feb 2016 10:53:12 -0600 Subject: rtlwifi: rtl8821ae: Convert driver to use common 5G channel table There are several copies of the 5G channel tables in this driver. These are removed so that the tables in the core will be used. This change also removes a useless message of "Channel 163 in Group not found". The number of possible 5G channels was reduced from 54 to a better value of 49 during the conversion. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- .../net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 18 --------------- .../net/wireless/realtek/rtlwifi/rtl8821ae/phy.c | 27 +++++----------------- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 +- 3 files changed, 7 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index bbb789f8990b..5da9bd0e5002 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -2786,14 +2786,6 @@ static void _rtl8812ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct txpower_info_2g pwrinfo24g; struct txpower_info_5g pwrinfo5g; - u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, - 56, 58, 60, 62, 64, 100, 102, 104, 106, - 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, - 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 168, 169, 171, 173, 175, 177}; - u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = {42, 58, 106, 122, 138, 155, 171}; u8 rf_path, index; u8 i; @@ -2872,16 +2864,6 @@ static void _rtl8821ae_read_txpower_info_from_hwpg(struct ieee80211_hw *hw, struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct txpower_info_2g pwrinfo24g; struct txpower_info_5g pwrinfo5g; - u8 channel5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, - 56, 58, 60, 62, 64, 100, 102, 104, 106, - 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, - 140, 142, 144, 149, 151, 153, 155, 157, - 159, 161, 163, 165, 167, 168, 169, 171, - 173, 175, 177}; - u8 channel5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { - 42, 58, 106, 122, 138, 155, 171}; u8 rf_path, index; u8 i; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 9b4d8a637915..74165b3eb362 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -1472,18 +1472,13 @@ static char _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, { struct rtl_priv *rtlpriv = rtl_priv(hw); char channel_index = -1; - u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, - 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, - 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 149, - 151, 153, 155, 157, 159, 161, 163, 165, 167, 168, 169, 171, - 173, 175, 177}; u8 i = 0; + if (band == BAND_ON_2_4G) channel_index = channel - 1; else if (band == BAND_ON_5G) { - for (i = 0; i < sizeof(channel_5g)/sizeof(u8); ++i) { - if (channel_5g[i] == channel) + for (i = 0; i < sizeof(channel5g)/sizeof(u8); ++i) { + if (channel5g[i] == channel) channel_index = i; } } else @@ -2240,13 +2235,6 @@ void rtl8821ae_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel) static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) { - u8 channel_5g[CHANNEL_MAX_NUMBER_5G] = { - 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, - 64, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, - 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, - 142, 144, 149, 151, 153, 155, 157, 159, 161, 163, 165, - 167, 168, 169, 171, 173, 175, 177 - }; u8 i = 0; bool in_24g = true; @@ -2257,7 +2245,7 @@ static bool _rtl8821ae_phy_get_chnl_index(u8 channel, u8 *chnl_index) in_24g = false; for (i = 0; i < CHANNEL_MAX_NUMBER_5G; ++i) { - if (channel_5g[i] == channel) { + if (channel5g[i] == channel) { *chnl_index = i; return in_24g; } @@ -2728,13 +2716,10 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path, rate <= DESC_RATEVHT2SS_MCS9)) txpower += rtlefuse->txpwr_5g_bw40diff[path][TX_2S]; } else if (bandwidth == HT_CHANNEL_WIDTH_80) { - u8 channel_5g_80m[CHANNEL_MAX_NUMBER_5G_80M] = { - 42, 58, 106, 122, 138, 155, 171 - }; u8 i; - for (i = 0; i < sizeof(channel_5g_80m) / sizeof(u8); ++i) - if (channel_5g_80m[i] == channel) + for (i = 0; i < sizeof(channel5g_80m) / sizeof(u8); ++i) + if (channel5g_80m[i] == channel) index = i; if ((DESC_RATEMCS0 <= rate && rate <= DESC_RATEMCS15) || diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index e603819b2953..9e3cdd732eca 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -116,7 +116,7 @@ #define CHANNEL_MAX_NUMBER (14 + 24 + 21) /* 14 is the max channel no */ #define CHANNEL_MAX_NUMBER_2G 14 -#define CHANNEL_MAX_NUMBER_5G 54 /* Please refer to +#define CHANNEL_MAX_NUMBER_5G 49 /* Please refer to *"phy_GetChnlGroup8812A" and * "Hal_ReadTxPowerInfo8812A" */ -- cgit v1.2.3 From 4fb37186f3b2b1f5d1a8f3e6b26b719bf2df6ee3 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Tue, 2 Feb 2016 12:44:40 +0530 Subject: ath10k: remove impossible code len has been initialized with a value of 0 and buf_len with 4096. There is no way that this condition (len > buf_len) can be true now. Signed-off-by: Sudip Mukherjee Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 8d4148a96af8..3dd75a2daf08 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2182,9 +2182,6 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file, mutex_lock(&ar->conf_mutex); - if (len > buf_len) - len = buf_len; - len += scnprintf(buf + len, buf_len - len, "firmware-N.bin\t\t%08x\n", crc32_le(0, ar->firmware->data, ar->firmware->size)); -- cgit v1.2.3 From 74135f599f9a2d0c54674640aa7d2423ab8ca98c Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 3 Feb 2016 21:07:42 +0530 Subject: ath10k: fix updating peer stats rx duration We are not updating peer stats rx_duration periodically unless the user one polls for fw_stats, this is because we discard the update event since pdev list is empty. Fix this by updating rx duration periodically irrepective of checks for pdev list (irrespective of ping-pong response) Fixes: 856e7c3 ("ath10k: add debugfs support for Per STA total rx duration") Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 3dd75a2daf08..848a0ddca722 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -348,6 +348,9 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) */ peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map); + if (peer_stats_svc) + ath10k_sta_update_rx_duration(ar, &stats.peers); + if (ar->debug.fw_stats_done && !peer_stats_svc) { ath10k_warn(ar, "received unsolicited stats update event\n"); goto free; @@ -384,9 +387,6 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) goto free; } - if (peer_stats_svc) - ath10k_sta_update_rx_duration(ar, &stats.peers); - list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers); list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs); } -- cgit v1.2.3 From 64f87d3616a01c53262c6e0e948d62df15923f1c Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 23 Feb 2016 18:02:55 +0100 Subject: vxlan: consolidate GBP handling even more Now when the packet is scrubbed early, skb->mark can be set in the GBP handling code. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 909f7931c297..656a303c0ac8 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1164,6 +1164,7 @@ out: } static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, + struct sk_buff *skb, u32 vxflags, struct vxlan_metadata *md, struct metadata_dst *tun_dst) { @@ -1183,6 +1184,9 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, if (gbp->policy_applied) md->gbp |= VXLAN_GBP_POLICY_APPLIED; + /* In flow-based mode, GBP is carried in dst_metadata */ + if (!(vxflags & VXLAN_F_COLLECT_METADATA)) + skb->mark = md->gbp; out: unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } @@ -1228,9 +1232,6 @@ static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, goto drop; skb_reset_network_header(skb); - /* In flow-based mode, GBP is carried in dst_metadata */ - if (!(vs->flags & VXLAN_F_COLLECT_METADATA)) - skb->mark = md->gbp; if (oip6) err = IP6_ECN_decapsulate(oip6, skb); @@ -1329,7 +1330,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) if (!vxlan_remcsum(&unparsed, skb, vs->flags)) goto drop; if (vs->flags & VXLAN_F_GBP) - vxlan_parse_gbp_hdr(&unparsed, md, tun_dst); + vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md, tun_dst); if (unparsed.vx_flags || unparsed.vx_vni) { /* If there are any unprocessed flags remaining treat -- cgit v1.2.3 From 1ab016e237e7d0734fb7adbbfe6d4cacb3520421 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 23 Feb 2016 18:02:56 +0100 Subject: vxlan: move inner L2 header processing to a separate function This code will be different for VXLAN-GPE, so move it to a separate function. It will also make the rx path less spaghetti-like. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 49 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 33 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 656a303c0ac8..68a8f9f43e2a 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1191,15 +1191,11 @@ out: unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS; } -static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, - struct sk_buff *skb, struct vxlan_metadata *md, - struct metadata_dst *tun_dst) +static bool vxlan_set_mac(struct vxlan_dev *vxlan, + struct vxlan_sock *vs, + struct sk_buff *skb) { - struct iphdr *oip = NULL; - struct ipv6hdr *oip6 = NULL; - struct pcpu_sw_netstats *stats; union vxlan_addr saddr; - int err = 0; skb_reset_mac_header(skb); skb->protocol = eth_type_trans(skb, vxlan->dev); @@ -1207,30 +1203,51 @@ static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, /* Ignore packet loops (and multicast echo) */ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) - goto drop; + return false; /* Get data from the outer IP header */ if (vxlan_get_sk_family(vs) == AF_INET) { - oip = ip_hdr(skb); - saddr.sin.sin_addr.s_addr = oip->saddr; + saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; saddr.sa.sa_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { - oip6 = ipv6_hdr(skb); - saddr.sin6.sin6_addr = oip6->saddr; + saddr.sin6.sin6_addr = ipv6_hdr(skb)->saddr; saddr.sa.sa_family = AF_INET6; #endif } + if ((vxlan->flags & VXLAN_F_LEARN) && + vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) + return false; + + return true; +} + +static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, + struct sk_buff *skb, struct vxlan_metadata *md, + struct metadata_dst *tun_dst) +{ + struct iphdr *oip = NULL; + struct ipv6hdr *oip6 = NULL; + struct pcpu_sw_netstats *stats; + int err = 0; + + if (!vxlan_set_mac(vxlan, vs, skb)) + goto drop; + + /* Get data from the outer IP header */ + if (vxlan_get_sk_family(vs) == AF_INET) + oip = ip_hdr(skb); +#if IS_ENABLED(CONFIG_IPV6) + else + oip6 = ipv6_hdr(skb); +#endif + if (tun_dst) { skb_dst_set(skb, (struct dst_entry *)tun_dst); tun_dst = NULL; } - if ((vxlan->flags & VXLAN_F_LEARN) && - vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source)) - goto drop; - skb_reset_network_header(skb); if (oip6) -- cgit v1.2.3 From 760c68054e9ed1c6e43b06f916f6efc2c8a1adcc Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 23 Feb 2016 18:02:57 +0100 Subject: vxlan: move ECN decapsulation to a separate function It simplifies the vxlan_rcv function. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 62 ++++++++++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 68a8f9f43e2a..382535bc9e59 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1205,7 +1205,7 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr)) return false; - /* Get data from the outer IP header */ + /* Get address from the outer IP header */ if (vxlan_get_sk_family(vs) == AF_INET) { saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; saddr.sa.sa_family = AF_INET; @@ -1223,52 +1223,52 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan, return true; } +static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, + struct sk_buff *skb) +{ + int err = 0; + + if (vxlan_get_sk_family(vs) == AF_INET) + err = IP_ECN_decapsulate(oiph, skb); +#if IS_ENABLED(CONFIG_IPV6) + else + err = IP6_ECN_decapsulate(oiph, skb); +#endif + + if (unlikely(err) && log_ecn_error) { + if (vxlan_get_sk_family(vs) == AF_INET) + net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", + &((struct iphdr *)oiph)->saddr, + ((struct iphdr *)oiph)->tos); + else + net_info_ratelimited("non-ECT from %pI6\n", + &((struct ipv6hdr *)oiph)->saddr); + } + return err <= 1; +} + static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, struct sk_buff *skb, struct vxlan_metadata *md, struct metadata_dst *tun_dst) { - struct iphdr *oip = NULL; - struct ipv6hdr *oip6 = NULL; struct pcpu_sw_netstats *stats; - int err = 0; + void *oiph; if (!vxlan_set_mac(vxlan, vs, skb)) goto drop; - /* Get data from the outer IP header */ - if (vxlan_get_sk_family(vs) == AF_INET) - oip = ip_hdr(skb); -#if IS_ENABLED(CONFIG_IPV6) - else - oip6 = ipv6_hdr(skb); -#endif - if (tun_dst) { skb_dst_set(skb, (struct dst_entry *)tun_dst); tun_dst = NULL; } + oiph = skb_network_header(skb); skb_reset_network_header(skb); - if (oip6) - err = IP6_ECN_decapsulate(oip6, skb); - if (oip) - err = IP_ECN_decapsulate(oip, skb); - - if (unlikely(err)) { - if (log_ecn_error) { - if (oip6) - net_info_ratelimited("non-ECT from %pI6\n", - &oip6->saddr); - if (oip) - net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", - &oip->saddr, oip->tos); - } - if (err > 1) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; - goto drop; - } + if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { + ++vxlan->dev->stats.rx_frame_errors; + ++vxlan->dev->stats.rx_errors; + goto drop; } stats = this_cpu_ptr(vxlan->dev->tstats); -- cgit v1.2.3 From f2d1968ec85e85def98fdea0cf325851433bb60a Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 23 Feb 2016 18:02:58 +0100 Subject: vxlan: consolidate rx handling to a single function Now when both vxlan_udp_encap_recv and vxlan_rcv are much shorter, combine them into a single function. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 72 +++++++++++++++++++++-------------------------------- 1 file changed, 28 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 382535bc9e59..cfd6deb9f090 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1247,56 +1247,17 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, return err <= 1; } -static void vxlan_rcv(struct vxlan_dev *vxlan, struct vxlan_sock *vs, - struct sk_buff *skb, struct vxlan_metadata *md, - struct metadata_dst *tun_dst) -{ - struct pcpu_sw_netstats *stats; - void *oiph; - - if (!vxlan_set_mac(vxlan, vs, skb)) - goto drop; - - if (tun_dst) { - skb_dst_set(skb, (struct dst_entry *)tun_dst); - tun_dst = NULL; - } - - oiph = skb_network_header(skb); - skb_reset_network_header(skb); - - if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { - ++vxlan->dev->stats.rx_frame_errors; - ++vxlan->dev->stats.rx_errors; - goto drop; - } - - stats = this_cpu_ptr(vxlan->dev->tstats); - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); - - gro_cells_receive(&vxlan->gro_cells, skb); - - return; -drop: - if (tun_dst) - dst_release((struct dst_entry *)tun_dst); - - /* Consume bad packet */ - kfree_skb(skb); -} - /* Callback from net/ipv4/udp.c to receive packets */ -static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) +static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) { struct metadata_dst *tun_dst = NULL; + struct pcpu_sw_netstats *stats; struct vxlan_dev *vxlan; struct vxlan_sock *vs; struct vxlanhdr unparsed; struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; + void *oiph; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) @@ -1361,7 +1322,30 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) goto drop; } - vxlan_rcv(vxlan, vs, skb, md, tun_dst); + if (!vxlan_set_mac(vxlan, vs, skb)) + goto drop; + + if (tun_dst) { + skb_dst_set(skb, (struct dst_entry *)tun_dst); + tun_dst = NULL; + } + + oiph = skb_network_header(skb); + skb_reset_network_header(skb); + + if (!vxlan_ecn_decapsulate(vs, oiph, skb)) { + ++vxlan->dev->stats.rx_frame_errors; + ++vxlan->dev->stats.rx_errors; + goto drop; + } + + stats = this_cpu_ptr(vxlan->dev->tstats); + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += skb->len; + u64_stats_update_end(&stats->syncp); + + gro_cells_receive(&vxlan->gro_cells, skb); return 0; drop: @@ -2666,7 +2650,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, /* Mark socket as an encapsulation socket. */ tunnel_cfg.sk_user_data = vs; tunnel_cfg.encap_type = 1; - tunnel_cfg.encap_rcv = vxlan_udp_encap_recv; + tunnel_cfg.encap_rcv = vxlan_rcv; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); -- cgit v1.2.3 From 10a5af238cd29b7e43af0dc0690ae9baa0650c36 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Tue, 23 Feb 2016 18:02:59 +0100 Subject: vxlan: simplify metadata_dst usage in vxlan_rcv Now when the packet is scrubbed early, the metadata_dst can be assigned to the skb as soon as it is allocated. This simplifies the error cleanup path, as the dst will be freed by kfree_skb. It is also not necessary to pass it as a parameter to functions anymore. Signed-off-by: Jiri Benc Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index cfd6deb9f090..775ddb48388d 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1165,16 +1165,17 @@ out: static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed, struct sk_buff *skb, u32 vxflags, - struct vxlan_metadata *md, - struct metadata_dst *tun_dst) + struct vxlan_metadata *md) { struct vxlanhdr_gbp *gbp = (struct vxlanhdr_gbp *)unparsed; + struct metadata_dst *tun_dst; if (!(unparsed->vx_flags & VXLAN_HF_GBP)) goto out; md->gbp = ntohs(gbp->policy_id); + tun_dst = (struct metadata_dst *)skb_dst(skb); if (tun_dst) tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; @@ -1250,7 +1251,6 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph, /* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) { - struct metadata_dst *tun_dst = NULL; struct pcpu_sw_netstats *stats; struct vxlan_dev *vxlan; struct vxlan_sock *vs; @@ -1289,6 +1289,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (vxlan_collect_metadata(vs)) { __be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni); + struct metadata_dst *tun_dst; tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY, vxlan_vni_to_tun_id(vni), sizeof(*md)); @@ -1297,6 +1298,8 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) goto drop; md = ip_tunnel_info_opts(&tun_dst->u.tun_info); + + skb_dst_set(skb, (struct dst_entry *)tun_dst); } else { memset(md, 0, sizeof(*md)); } @@ -1308,7 +1311,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vxlan_remcsum(&unparsed, skb, vs->flags)) goto drop; if (vs->flags & VXLAN_F_GBP) - vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md, tun_dst); + vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md); if (unparsed.vx_flags || unparsed.vx_vni) { /* If there are any unprocessed flags remaining treat @@ -1325,11 +1328,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) if (!vxlan_set_mac(vxlan, vs, skb)) goto drop; - if (tun_dst) { - skb_dst_set(skb, (struct dst_entry *)tun_dst); - tun_dst = NULL; - } - oiph = skb_network_header(skb); skb_reset_network_header(skb); @@ -1349,9 +1347,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb) return 0; drop: - if (tun_dst) - dst_release((struct dst_entry *)tun_dst); - /* Consume bad packet */ kfree_skb(skb); return 0; -- cgit v1.2.3 From ceff5eff3a4c690764d11f2d759c27a0d2dea732 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Tue, 23 Feb 2016 12:13:55 -0500 Subject: net: dsa: mv88e6xxx: implement port_vlan_dump Remove the port_pvid_get and vlan_getnext functions in favor of a simpler mv88e6xxx_port_vlan_dump function. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6171.c | 3 +- drivers/net/dsa/mv88e6352.c | 3 +- drivers/net/dsa/mv88e6xxx.c | 110 ++++++++++++++++++++------------------------ drivers/net/dsa/mv88e6xxx.h | 6 +-- 4 files changed, 56 insertions(+), 66 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index 6e18213b9c04..dd1ebaf48077 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -106,11 +106,10 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .port_join_bridge = mv88e6xxx_port_bridge_join, .port_leave_bridge = mv88e6xxx_port_bridge_leave, .port_stp_update = mv88e6xxx_port_stp_update, - .port_pvid_get = mv88e6xxx_port_pvid_get, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, - .vlan_getnext = mv88e6xxx_vlan_getnext, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index a47f52f44b0d..bbca36ac4f77 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -327,11 +327,10 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .port_join_bridge = mv88e6xxx_port_bridge_join, .port_leave_bridge = mv88e6xxx_port_bridge_leave, .port_stp_update = mv88e6xxx_port_stp_update, - .port_pvid_get = mv88e6xxx_port_pvid_get, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, - .vlan_getnext = mv88e6xxx_vlan_getnext, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, .port_fdb_add = mv88e6xxx_port_fdb_add, .port_fdb_del = mv88e6xxx_port_fdb_del, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 685dcb047979..d98dc635b00b 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1151,19 +1151,6 @@ static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) return 0; } -int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) -{ - int ret; - - ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN); - if (ret < 0) - return ret; - - *pvid = ret & PORT_DEFAULT_VLAN_MASK; - - return 0; -} - static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid) { return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN, @@ -1306,6 +1293,57 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, return 0; } +int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct mv88e6xxx_vtu_stu_entry next; + u16 pvid; + int err; + + mutex_lock(&ps->smi_mutex); + + err = _mv88e6xxx_port_pvid_get(ds, port, &pvid); + if (err) + goto unlock; + + err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK); + if (err) + goto unlock; + + do { + err = _mv88e6xxx_vtu_getnext(ds, &next); + if (err) + break; + + if (!next.valid) + break; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + continue; + + /* reinit and dump this VLAN obj */ + vlan->vid_begin = vlan->vid_end = next.vid; + vlan->flags = 0; + + if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) + vlan->flags |= BRIDGE_VLAN_INFO_UNTAGGED; + + if (next.vid == pvid) + vlan->flags |= BRIDGE_VLAN_INFO_PVID; + + err = cb(&vlan->obj); + if (err) + break; + } while (next.vid < GLOBAL_VTU_VID_MASK); + +unlock: + mutex_unlock(&ps->smi_mutex); + + return err; +} + static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, struct mv88e6xxx_vtu_stu_entry *entry) { @@ -1675,52 +1713,6 @@ unlock: return err; } -int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, - unsigned long *ports, unsigned long *untagged) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - struct mv88e6xxx_vtu_stu_entry next; - int port; - int err; - - if (*vid == 4095) - return -ENOENT; - - mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_vtu_vid_write(ds, *vid); - if (err) - goto unlock; - - err = _mv88e6xxx_vtu_getnext(ds, &next); -unlock: - mutex_unlock(&ps->smi_mutex); - - if (err) - return err; - - if (!next.valid) - return -ENOENT; - - *vid = next.vid; - - for (port = 0; port < ps->num_ports; ++port) { - clear_bit(port, ports); - clear_bit(port, untagged); - - if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) - continue; - - if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED || - next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) - set_bit(port, ports); - - if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED) - set_bit(port, untagged); - } - - return 0; -} - static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds, const unsigned char *addr) { diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 260b4918e427..6a30bda63a2f 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -494,9 +494,9 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, struct switchdev_trans *trans); int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan); -int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *vid); -int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid, - unsigned long *ports, unsigned long *untagged); +int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port, + struct switchdev_obj_port_vlan *vlan, + int (*cb)(struct switchdev_obj *obj)); int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans); -- cgit v1.2.3 From 407353ec85ccc775ce342a6c5a1202f34ed827cc Mon Sep 17 00:00:00 2001 From: Clemens Gruber Date: Tue, 23 Feb 2016 20:16:58 +0100 Subject: phy: marvell: Fix 88E1510 initialization A bug was introduced in the merge commit b633353115e3 ("Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net") The generic marvell_config_init (and therefore marvell_of_reg_init) is not called anymore for the Marvell 88E1510 (in net-next). This patch calls marvell_config_init and moves the specific init function for the 88E1510 below the marvell_config_init function to avoid adding a function predeclaration. Signed-off-by: Clemens Gruber Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 70 +++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 9fb9d80ae419..280e8795b463 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -443,41 +443,6 @@ static int m88e1318_config_aneg(struct phy_device *phydev) return m88e1121_config_aneg(phydev); } -static int m88e1510_config_init(struct phy_device *phydev) -{ - int err; - int temp; - - /* SGMII-to-Copper mode initialization */ - if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - /* Select page 18 */ - err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18); - if (err < 0) - return err; - - /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */ - temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1); - temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK; - temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII; - err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); - if (err < 0) - return err; - - /* PHY reset is necessary after changing MODE[2:0] */ - temp |= MII_88E1510_GEN_CTRL_REG_1_RESET; - err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); - if (err < 0) - return err; - - /* Reset page selection */ - err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0); - if (err < 0) - return err; - } - - return 0; -} - static int m88e1510_config_aneg(struct phy_device *phydev) { int err; @@ -671,6 +636,41 @@ static int m88e1111_config_init(struct phy_device *phydev) return phy_write(phydev, MII_BMCR, BMCR_RESET); } +static int m88e1510_config_init(struct phy_device *phydev) +{ + int err; + int temp; + + /* SGMII-to-Copper mode initialization */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + /* Select page 18 */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 18); + if (err < 0) + return err; + + /* In reg 20, write MODE[2:0] = 0x1 (SGMII to Copper) */ + temp = phy_read(phydev, MII_88E1510_GEN_CTRL_REG_1); + temp &= ~MII_88E1510_GEN_CTRL_REG_1_MODE_MASK; + temp |= MII_88E1510_GEN_CTRL_REG_1_MODE_SGMII; + err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + if (err < 0) + return err; + + /* PHY reset is necessary after changing MODE[2:0] */ + temp |= MII_88E1510_GEN_CTRL_REG_1_RESET; + err = phy_write(phydev, MII_88E1510_GEN_CTRL_REG_1, temp); + if (err < 0) + return err; + + /* Reset page selection */ + err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0); + if (err < 0) + return err; + } + + return marvell_config_init(phydev); +} + static int m88e1118_config_aneg(struct phy_device *phydev) { int err; -- cgit v1.2.3 From c35ec779663b030ad3a03147be23146274026ae3 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 24 Feb 2016 17:26:55 +0800 Subject: gianfar_ptp: replace get_of_u32 with of_property_read_u32 Replace get_of_u32 with standard helper function of_property_read_u32 since the latter can process cpu endianness. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar_ptp.c | 33 +++++++++++----------------- 1 file changed, 13 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c index b40fba929d65..57798814160d 100644 --- a/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -422,19 +422,6 @@ static struct ptp_clock_info ptp_gianfar_caps = { .enable = ptp_gianfar_enable, }; -/* OF device tree */ - -static int get_of_u32(struct device_node *node, char *str, u32 *val) -{ - int plen; - const u32 *prop = of_get_property(node, str, &plen); - - if (!prop || plen != sizeof(*prop)) - return -1; - *val = *prop; - return 0; -} - static int gianfar_ptp_probe(struct platform_device *dev) { struct device_node *node = dev->dev.of_node; @@ -452,15 +439,21 @@ static int gianfar_ptp_probe(struct platform_device *dev) etsects->caps = ptp_gianfar_caps; - if (get_of_u32(node, "fsl,cksel", &etsects->cksel)) + if (of_property_read_u32(node, "fsl,cksel", &etsects->cksel)) etsects->cksel = DEFAULT_CKSEL; - if (get_of_u32(node, "fsl,tclk-period", &etsects->tclk_period) || - get_of_u32(node, "fsl,tmr-prsc", &etsects->tmr_prsc) || - get_of_u32(node, "fsl,tmr-add", &etsects->tmr_add) || - get_of_u32(node, "fsl,tmr-fiper1", &etsects->tmr_fiper1) || - get_of_u32(node, "fsl,tmr-fiper2", &etsects->tmr_fiper2) || - get_of_u32(node, "fsl,max-adj", &etsects->caps.max_adj)) { + if (of_property_read_u32(node, + "fsl,tclk-period", &etsects->tclk_period) || + of_property_read_u32(node, + "fsl,tmr-prsc", &etsects->tmr_prsc) || + of_property_read_u32(node, + "fsl,tmr-add", &etsects->tmr_add) || + of_property_read_u32(node, + "fsl,tmr-fiper1", &etsects->tmr_fiper1) || + of_property_read_u32(node, + "fsl,tmr-fiper2", &etsects->tmr_fiper2) || + of_property_read_u32(node, + "fsl,max-adj", &etsects->caps.max_adj)) { pr_err("device tree node missing required elements\n"); goto no_node; } -- cgit v1.2.3 From f54af12f4348b23d78f5186854d23e47795d21cb Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 24 Feb 2016 17:26:56 +0800 Subject: gianfar: fix endianness for hardware timestamp Fix endianness for the 64-bit hardware timestamp value with be64_to_cpu to support both PowerPC platforms and ARM platforms. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/gianfar.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 1e1157fa77f4..a01e5a32b631 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -2706,7 +2706,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) ~0x7UL); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - shhwtstamps.hwtstamp = ns_to_ktime(*ns); + shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN); skb_tstamp_tx(skb, &shhwtstamps); gfar_clear_txbd_status(bdp); @@ -3035,7 +3035,7 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb) u64 *ns = (u64 *) skb->data; memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(*ns); + shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns)); } if (priv->padding) -- cgit v1.2.3 From 5327ef9c95b09f536e886c61d1d768b3236748d5 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 24 Feb 2016 19:27:49 +0530 Subject: net: tulip: Use setup_timer() Convert a call to init_timer and accompanying intializations of the timer's data and function fields to a call to setup_timer. The Coccinelle semantic patch that fixes this problem is as follows: // @@ expression t,f,d; @@ -init_timer(&t); +setup_timer(&t,f,d); -t.data = d; -t.function = f; // Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/net/ethernet/dec/tulip/tulip_core.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index b553409e04ad..94d0eebef129 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -505,9 +505,7 @@ media_picked: tp->timer.expires = RUN_AT(next_tick); add_timer(&tp->timer); #ifdef CONFIG_TULIP_NAPI - init_timer(&tp->oom_timer); - tp->oom_timer.data = (unsigned long)dev; - tp->oom_timer.function = oom_timer; + setup_timer(&tp->oom_timer, oom_timer, (unsigned long)dev); #endif } @@ -782,9 +780,8 @@ static void tulip_down (struct net_device *dev) spin_unlock_irqrestore (&tp->lock, flags); - init_timer(&tp->timer); - tp->timer.data = (unsigned long)dev; - tp->timer.function = tulip_tbl[tp->chip_id].media_timer; + setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer, + (unsigned long)dev); dev->if_port = tp->saved_if_port; @@ -1475,9 +1472,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->csr0 = csr0; spin_lock_init(&tp->lock); spin_lock_init(&tp->mii_lock); - init_timer(&tp->timer); - tp->timer.data = (unsigned long)dev; - tp->timer.function = tulip_tbl[tp->chip_id].media_timer; + setup_timer(&tp->timer, tulip_tbl[tp->chip_id].media_timer, + (unsigned long)dev); INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); -- cgit v1.2.3 From de55558dc4e6562197bf0ea0fe249cbd7ccebae5 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 24 Feb 2016 19:28:01 +0530 Subject: forcedeth: Use setup_timer() Convert a call to init_timer and accompanying intializations of the timer's data and function fields to a call to setup_timer. The Coccinelle semantic patch that fixes this problem is as follows: // @@ expression t,f,d; @@ -init_timer(&t); +setup_timer(&t,f,d); -t.data = d; -t.function = f; // Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/net/ethernet/nvidia/forcedeth.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 75e88f4c1531..9b0d7f463ff3 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5629,12 +5629,8 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) u64_stats_init(&np->swstats_rx_syncp); u64_stats_init(&np->swstats_tx_syncp); - init_timer(&np->oom_kick); - np->oom_kick.data = (unsigned long) dev; - np->oom_kick.function = nv_do_rx_refill; /* timer handler */ - init_timer(&np->nic_poll); - np->nic_poll.data = (unsigned long) dev; - np->nic_poll.function = nv_do_nic_poll; /* timer handler */ + setup_timer(&np->oom_kick, nv_do_rx_refill, (unsigned long)dev); + setup_timer(&np->nic_poll, nv_do_nic_poll, (unsigned long)dev); init_timer_deferrable(&np->stats_poll); np->stats_poll.data = (unsigned long) dev; np->stats_poll.function = nv_do_stats_poll; /* timer handler */ -- cgit v1.2.3 From 5b6490def9168af6a1609df910a7800bcbc97977 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 24 Feb 2016 19:28:19 +0530 Subject: 3c59x: Use setup_timer() Convert a call to init_timer and accompanying intializations of the timer's data and function fields to a call to setup_timer. The Coccinelle semantic patch that fixes this problem is as follows: // @@ expression t,f,d; @@ -init_timer(&t); +setup_timer(&t,f,d); ... -t.data = d; -t.function = f; // Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 79e1a0282163..c377607e6745 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1601,15 +1601,9 @@ vortex_up(struct net_device *dev) dev->name, media_tbl[dev->if_port].name); } - init_timer(&vp->timer); + setup_timer(&vp->timer, vortex_timer, (unsigned long)dev); vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); - vp->timer.data = (unsigned long)dev; - vp->timer.function = vortex_timer; /* timer handler */ - add_timer(&vp->timer); - - init_timer(&vp->rx_oom_timer); - vp->rx_oom_timer.data = (unsigned long)dev; - vp->rx_oom_timer.function = rx_oom_timer; + setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev); if (vortex_debug > 1) pr_debug("%s: Initial media type %s.\n", -- cgit v1.2.3 From 016c0bbae1d17d4509b74227dca618e01263e61a Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 24 Feb 2016 20:09:38 +0530 Subject: netxen: Use kobj_to_dev() Introduce the use of kobj_to_dev() helper function instead of open coding it with container_of() The Coccinelle semantic patch used to make this change is as follows: // @@ expression a; symbol kobj; @@ - container_of(a, struct device, kobj) + kobj_to_dev(a) // Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 6409a06bbdf6..fd362b6923f4 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -2891,7 +2891,7 @@ netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; @@ -2919,7 +2919,7 @@ netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u32 data; u64 qmdata; @@ -2960,7 +2960,7 @@ netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; @@ -2981,7 +2981,7 @@ static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); u64 data; int ret; @@ -3018,7 +3018,7 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t offset, size_t size) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct netxen_adapter *adapter = dev_get_drvdata(dev); struct net_device *netdev = adapter->netdev; struct netxen_dimm_cfg dimm; -- cgit v1.2.3 From 0e7441d73dafe5481f62029bad8ef046e22313b1 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Feb 2016 16:52:45 +0200 Subject: qede: Change pci DID for 10g device The device ID for the 10g module has changed. Populate the pci_ids table accordingly. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5f15e23a0f7d..76fa5d716c28 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -53,7 +53,7 @@ MODULE_PARM_DESC(debug, " Default debug msglevel"); static const struct qed_eth_ops *qed_ops; #define CHIP_NUM_57980S_40 0x1634 -#define CHIP_NUM_57980S_10 0x1635 +#define CHIP_NUM_57980S_10 0x1666 #define CHIP_NUM_57980S_MF 0x1636 #define CHIP_NUM_57980S_100 0x1644 #define CHIP_NUM_57980S_50 0x1654 -- cgit v1.2.3 From b1199b100e67f6d296e915b501f3c84d892a99c2 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Feb 2016 16:52:46 +0200 Subject: qede: Linearize SKBs when needed There's a corner-case in HW where an SKB queued for transmission that contains too many frags will cause FW to assert. This patch solves this by linearizing the SKB if necessary. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 38 ++++++++++++++++++++++++---- 1 file changed, 33 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 76fa5d716c28..f07b9a906352 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -380,6 +380,28 @@ static int map_frag_to_bd(struct qede_dev *edev, return 0; } +/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */ +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) +static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb, + u8 xmit_type) +{ + int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; + + if (xmit_type & XMIT_LSO) { + int hlen; + + hlen = skb_transport_header(skb) + + tcp_hdrlen(skb) - skb->data; + + /* linear payload would require its own BD */ + if (skb_headlen(skb) > hlen) + allowed_frags--; + } + + return (skb_shinfo(skb)->nr_frags > allowed_frags); +} +#endif + /* Main transmit function */ static netdev_tx_t qede_start_xmit(struct sk_buff *skb, @@ -407,16 +429,22 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, txq = QEDE_TX_QUEUE(edev, txq_index); netdev_txq = netdev_get_tx_queue(ndev, txq_index); - /* Current code doesn't support SKB linearization, since the max number - * of skb frags can be passed in the FW HSI. - */ - BUILD_BUG_ON(MAX_SKB_FRAGS > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET); - WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); xmit_type = qede_xmit_type(edev, skb, &ipv6_ext); +#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) + if (qede_pkt_req_lin(edev, skb, xmit_type)) { + if (skb_linearize(skb)) { + DP_NOTICE(edev, + "SKB linearization failed - silently dropping this SKB\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + } +#endif + /* Fill the entry in the SW ring and the BDs in the FW ring */ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX; txq->sw_tx_ring[idx].skb = skb; -- cgit v1.2.3 From 8e025ae28ae866c6a01239c4df6cf465a7deb02f Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Feb 2016 16:52:47 +0200 Subject: qede: Don't report link change needlessly There are several corner cases where driver might get a 2nd notification about the same link change. Don't log any additional changes if the physical carrier is already reported as it should. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f07b9a906352..c0dd23614ed8 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2780,13 +2780,17 @@ static void qede_link_update(void *dev, struct qed_link_output *link) } if (link->link_up) { - DP_NOTICE(edev, "Link is up\n"); - netif_tx_start_all_queues(edev->ndev); - netif_carrier_on(edev->ndev); + if (!netif_carrier_ok(edev->ndev)) { + DP_NOTICE(edev, "Link is up\n"); + netif_tx_start_all_queues(edev->ndev); + netif_carrier_on(edev->ndev); + } } else { - DP_NOTICE(edev, "Link is down\n"); - netif_tx_disable(edev->ndev); - netif_carrier_off(edev->ndev); + if (netif_carrier_ok(edev->ndev)) { + DP_NOTICE(edev, "Link is down\n"); + netif_tx_disable(edev->ndev); + netif_carrier_off(edev->ndev); + } } } -- cgit v1.2.3 From d43d3f0f393b21ee14c0487d5757edae194c4848 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Feb 2016 16:52:48 +0200 Subject: qed: add MODULE_FIRMWARE() Module is using a binary firmware file and so should be marked as such. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 593f8871adb6..08cd92d66b6b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION); #define QED_FW_FILE_NAME \ "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" +MODULE_FIRMWARE(QED_FW_FILE_NAME); + static int __init qed_init(void) { pr_notice("qed_init called\n"); -- cgit v1.2.3 From 0dfaba6d0b4755fb379bd90dd5451e077617003f Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Feb 2016 16:52:49 +0200 Subject: qed: Prevent probe on previous error Don't allow driver to probe on an adapter at a failed state; Gracefully block the probe instead. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 08cd92d66b6b..e4e6ca5e8cee 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -99,12 +99,15 @@ static void qed_free_pci(struct qed_dev *cdev) pci_disable_device(pdev); } +#define PCI_REVISION_ID_ERROR_VAL 0xff + /* Performs PCI initializations as well as initializing PCI-related parameters * in the device structrue. Returns 0 in case of success. */ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) { + u8 rev_id; int rc; cdev->pdev = pdev; @@ -138,6 +141,14 @@ static int qed_init_pci(struct qed_dev *cdev, pci_save_state(pdev); } + pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); + if (rev_id == PCI_REVISION_ID_ERROR_VAL) { + DP_NOTICE(cdev, + "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", + rev_id); + rc = -ENODEV; + goto err2; + } if (!pci_is_pcie(pdev)) { DP_NOTICE(cdev, "The bus is not PCI Express\n"); rc = -EIO; -- cgit v1.2.3 From 5abd7e92f46527baf88eadfe6a36ee15f0d7e89f Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 24 Feb 2016 16:52:50 +0200 Subject: qed, qede: rebrand module description Drop the `QL4xxx 40G/100G' and use `FastLinQ 4xxxx' instead. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_main.c | 6 +++--- drivers/net/ethernet/qlogic/qede/qede_main.c | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index e4e6ca5e8cee..25d6e91335ea 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -29,10 +29,10 @@ #include "qed_mcp.h" #include "qed_hw.h" -static const char version[] = - "QLogic QL4xxx 40G/100G Ethernet Driver qed " DRV_MODULE_VERSION "\n"; +static char version[] = + "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; -MODULE_DESCRIPTION("QLogic 25G/40G/50G/100G Core Module"); +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index c0dd23614ed8..ddd9e4aaa500 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -39,10 +39,10 @@ #include "qede.h" -static const char version[] = "QLogic QL4xxx 40G/100G Ethernet Driver qede " - DRV_MODULE_VERSION "\n"; +static char version[] = + "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; -MODULE_DESCRIPTION("QLogic 40G/100G Ethernet Driver"); +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -- cgit v1.2.3 From 4f03980ca4d7f56222c8397d7b91f15efafa2918 Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:57:57 -0800 Subject: net: usnic: remove unused call to ethtool_ops::get_settings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 6cdb4d23f78f..ea003ec393d5 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -269,7 +269,6 @@ int usnic_ib_query_device(struct ib_device *ibdev, struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); union ib_gid gid; struct ethtool_drvinfo info; - struct ethtool_cmd cmd; int qp_per_vf; usnic_dbg("\n"); @@ -278,7 +277,6 @@ int usnic_ib_query_device(struct ib_device *ibdev, mutex_lock(&us_ibdev->usdev_lock); us_ibdev->netdev->ethtool_ops->get_drvinfo(us_ibdev->netdev, &info); - us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); usnic_mac_ip_to_gid(us_ibdev->ufdev->mac, us_ibdev->ufdev->inaddr, &gid.raw[0]); -- cgit v1.2.3 From 48133335d7e0a661f40a5899d20945dabb600f0a Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:57:58 -0800 Subject: net: usnic: use __ethtool_get_settings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index ea003ec393d5..1cf19a31f57b 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -329,7 +329,7 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); - us_ibdev->netdev->ethtool_ops->get_settings(us_ibdev->netdev, &cmd); + __ethtool_get_settings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); props->lid = 0; -- cgit v1.2.3 From 96a0c39633cd3a50f1129c5f542e3a653b9fb8c9 Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:01 -0800 Subject: net: usnic: use __ethtool_get_ksettings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 1cf19a31f57b..a5bfbba6bbac 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -324,12 +324,12 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct usnic_ib_dev *us_ibdev = to_usdev(ibdev); - struct ethtool_cmd cmd; + struct ethtool_link_ksettings cmd; usnic_dbg("\n"); mutex_lock(&us_ibdev->usdev_lock); - __ethtool_get_settings(us_ibdev->netdev, &cmd); + __ethtool_get_link_ksettings(us_ibdev->netdev, &cmd); memset(props, 0, sizeof(*props)); props->lid = 0; @@ -353,8 +353,8 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, props->pkey_tbl_len = 1; props->bad_pkey_cntr = 0; props->qkey_viol_cntr = 0; - eth_speed_to_ib_speed(cmd.speed, &props->active_speed, - &props->active_width); + eth_speed_to_ib_speed(cmd.base.speed, &props->active_speed, + &props->active_width); props->max_mtu = IB_MTU_4096; props->active_mtu = iboe_get_mtu(us_ibdev->ufdev->mtu); /* Userspace will adjust for hdrs */ -- cgit v1.2.3 From 9856909c2abb32a795d9782bff2ce987696ffb9b Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:02 -0800 Subject: net: bonding: use __ethtool_get_ksettings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index a6527d5b3269..b6236ff3dbdd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -376,22 +376,20 @@ down: static void bond_update_speed_duplex(struct slave *slave) { struct net_device *slave_dev = slave->dev; - struct ethtool_cmd ecmd; - u32 slave_speed; + struct ethtool_link_ksettings ecmd; int res; slave->speed = SPEED_UNKNOWN; slave->duplex = DUPLEX_UNKNOWN; - res = __ethtool_get_settings(slave_dev, &ecmd); + res = __ethtool_get_link_ksettings(slave_dev, &ecmd); if (res < 0) return; - slave_speed = ethtool_cmd_speed(&ecmd); - if (slave_speed == 0 || slave_speed == ((__u32) -1)) + if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1)) return; - switch (ecmd.duplex) { + switch (ecmd.base.duplex) { case DUPLEX_FULL: case DUPLEX_HALF: break; @@ -399,8 +397,8 @@ static void bond_update_speed_duplex(struct slave *slave) return; } - slave->speed = slave_speed; - slave->duplex = ecmd.duplex; + slave->speed = ecmd.base.speed; + slave->duplex = ecmd.base.duplex; return; } -- cgit v1.2.3 From 314d10d73b2580c0a036536f6444d704d96d6082 Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:03 -0800 Subject: net: ipvlan: use __ethtool_get_ksettings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index a7ca1c519a0d..5802b9025765 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -346,12 +346,12 @@ static const struct header_ops ipvlan_header_ops = { .cache_update = eth_header_cache_update, }; -static int ipvlan_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { const struct ipvl_dev *ipvlan = netdev_priv(dev); - return __ethtool_get_settings(ipvlan->phy_dev, cmd); + return __ethtool_get_link_ksettings(ipvlan->phy_dev, cmd); } static void ipvlan_ethtool_get_drvinfo(struct net_device *dev, @@ -377,7 +377,7 @@ static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value) static const struct ethtool_ops ipvlan_ethtool_ops = { .get_link = ethtool_op_get_link, - .get_settings = ipvlan_ethtool_get_settings, + .get_link_ksettings = ipvlan_ethtool_get_link_ksettings, .get_drvinfo = ipvlan_ethtool_get_drvinfo, .get_msglevel = ipvlan_ethtool_get_msglevel, .set_msglevel = ipvlan_ethtool_set_msglevel, -- cgit v1.2.3 From 85f9581975dc0315b1b47c5323a8ee497ebd6a8f Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:04 -0800 Subject: net: macvlan: use __ethtool_get_ksettings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/net/macvlan.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 426a2cc27ac8..6e953e3a460a 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -940,12 +940,12 @@ static void macvlan_ethtool_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->version, "0.1", sizeof(drvinfo->version)); } -static int macvlan_ethtool_get_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int macvlan_ethtool_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *cmd) { const struct macvlan_dev *vlan = netdev_priv(dev); - return __ethtool_get_settings(vlan->lowerdev, cmd); + return __ethtool_get_link_ksettings(vlan->lowerdev, cmd); } static netdev_features_t macvlan_fix_features(struct net_device *dev, @@ -1020,7 +1020,7 @@ static int macvlan_dev_get_iflink(const struct net_device *dev) static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, - .get_settings = macvlan_ethtool_get_settings, + .get_link_ksettings = macvlan_ethtool_get_link_ksettings, .get_drvinfo = macvlan_ethtool_get_drvinfo, }; -- cgit v1.2.3 From 0ab6b544c185de913fa662c7565d78bdeccb54c4 Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:05 -0800 Subject: net: team: use __ethtool_get_ksettings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/net/team/team.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 00558e139584..2769835f48ca 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2813,12 +2813,12 @@ static void __team_port_change_send(struct team_port *port, bool linkup) port->state.linkup = linkup; team_refresh_port_linkup(port); if (linkup) { - struct ethtool_cmd ecmd; + struct ethtool_link_ksettings ecmd; - err = __ethtool_get_settings(port->dev, &ecmd); + err = __ethtool_get_link_ksettings(port->dev, &ecmd); if (!err) { - port->state.speed = ethtool_cmd_speed(&ecmd); - port->state.duplex = ecmd.duplex; + port->state.speed = ecmd.base.speed; + port->state.duplex = ecmd.base.duplex; goto send_event; } } -- cgit v1.2.3 From 008eb736932dcec25e99232dd280ed87e59635c8 Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:06 -0800 Subject: net: fcoe: use __ethtool_get_ksettings Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/scsi/fcoe/fcoe_transport.c | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index d7597c08fa11..641c60e8fda3 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -93,36 +93,40 @@ static struct notifier_block libfcoe_notifier = { int fcoe_link_speed_update(struct fc_lport *lport) { struct net_device *netdev = fcoe_get_netdev(lport); - struct ethtool_cmd ecmd; + struct ethtool_link_ksettings ecmd; - if (!__ethtool_get_settings(netdev, &ecmd)) { + if (!__ethtool_get_link_ksettings(netdev, &ecmd)) { lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT | FC_PORTSPEED_20GBIT | FC_PORTSPEED_40GBIT); - if (ecmd.supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full | - SUPPORTED_1000baseKX_Full)) + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseKX_Full)) lport->link_supported_speeds |= FC_PORTSPEED_1GBIT; - if (ecmd.supported & (SUPPORTED_10000baseT_Full | - SUPPORTED_10000baseKX4_Full | - SUPPORTED_10000baseKR_Full | - SUPPORTED_10000baseR_FEC)) + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_10000baseT_Full | + SUPPORTED_10000baseKX4_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_10000baseR_FEC)) lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; - if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full | - SUPPORTED_20000baseKR2_Full)) + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_20000baseMLD2_Full | + SUPPORTED_20000baseKR2_Full)) lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; - if (ecmd.supported & (SUPPORTED_40000baseKR4_Full | - SUPPORTED_40000baseCR4_Full | - SUPPORTED_40000baseSR4_Full | - SUPPORTED_40000baseLR4_Full)) + if (ecmd.link_modes.supported[0] & ( + SUPPORTED_40000baseKR4_Full | + SUPPORTED_40000baseCR4_Full | + SUPPORTED_40000baseSR4_Full | + SUPPORTED_40000baseLR4_Full)) lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; - switch (ethtool_cmd_speed(&ecmd)) { + switch (ecmd.base.speed) { case SPEED_1000: lport->link_speed = FC_PORTSPEED_1GBIT; break; -- cgit v1.2.3 From 3d8f7cc78d0eb07641fdcfb3961e8794778a6678 Mon Sep 17 00:00:00 2001 From: David Decotigny Date: Wed, 24 Feb 2016 10:58:12 -0800 Subject: net: mlx4: use new ETHTOOL_G/SSETTINGS API Signed-off-by: David Decotigny Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | 357 +++++++++++++----------- drivers/net/ethernet/mellanox/mlx4/en_main.c | 1 + drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 + 3 files changed, 189 insertions(+), 170 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index dd84cabb2a51..f69584a9b47f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -501,34 +501,30 @@ static u32 mlx4_en_autoneg_get(struct net_device *dev) return autoneg; } -static u32 ptys_get_supported_port(struct mlx4_ptys_reg *ptys_reg) +static void ptys2ethtool_update_supported_port(unsigned long *mask, + struct mlx4_ptys_reg *ptys_reg) { u32 eth_proto = be32_to_cpu(ptys_reg->eth_proto_cap); if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_T) | MLX4_PROT_MASK(MLX4_1000BASE_T) | MLX4_PROT_MASK(MLX4_100BASE_TX))) { - return SUPPORTED_TP; - } - - if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) + __set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); + } else if (eth_proto & (MLX4_PROT_MASK(MLX4_10GBASE_CR) | MLX4_PROT_MASK(MLX4_10GBASE_SR) | MLX4_PROT_MASK(MLX4_56GBASE_SR4) | MLX4_PROT_MASK(MLX4_40GBASE_CR4) | MLX4_PROT_MASK(MLX4_40GBASE_SR4) | MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII))) { - return SUPPORTED_FIBRE; - } - - if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) + __set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask); + } else if (eth_proto & (MLX4_PROT_MASK(MLX4_56GBASE_KR4) | MLX4_PROT_MASK(MLX4_40GBASE_KR4) | MLX4_PROT_MASK(MLX4_20GBASE_KR2) | MLX4_PROT_MASK(MLX4_10GBASE_KR) | MLX4_PROT_MASK(MLX4_10GBASE_KX4) | MLX4_PROT_MASK(MLX4_1000BASE_KX))) { - return SUPPORTED_Backplane; + __set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mask); } - return 0; } static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) @@ -574,122 +570,111 @@ static u32 ptys_get_active_port(struct mlx4_ptys_reg *ptys_reg) enum ethtool_report { SUPPORTED = 0, ADVERTISED = 1, - SPEED = 2 }; +struct ptys2ethtool_config { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised); + u32 speed; +}; + +static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg, + enum ethtool_report report) +{ + switch (report) { + case SUPPORTED: + return cfg->supported; + case ADVERTISED: + return cfg->advertised; + } + return NULL; +} + +#define MLX4_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \ + ({ \ + struct ptys2ethtool_config *cfg; \ + const unsigned int modes[] = { __VA_ARGS__ }; \ + unsigned int i; \ + cfg = &ptys2ethtool_map[reg_]; \ + cfg->speed = speed_; \ + bitmap_zero(cfg->supported, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + bitmap_zero(cfg->advertised, \ + __ETHTOOL_LINK_MODE_MASK_NBITS); \ + for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \ + __set_bit(modes[i], cfg->supported); \ + __set_bit(modes[i], cfg->advertised); \ + } \ + }) + /* Translates mlx4 link mode to equivalent ethtool Link modes/speed */ -static u32 ptys2ethtool_map[MLX4_LINK_MODES_SZ][3] = { - [MLX4_100BASE_TX] = { - SUPPORTED_100baseT_Full, - ADVERTISED_100baseT_Full, - SPEED_100 - }, - - [MLX4_1000BASE_T] = { - SUPPORTED_1000baseT_Full, - ADVERTISED_1000baseT_Full, - SPEED_1000 - }, - [MLX4_1000BASE_CX_SGMII] = { - SUPPORTED_1000baseKX_Full, - ADVERTISED_1000baseKX_Full, - SPEED_1000 - }, - [MLX4_1000BASE_KX] = { - SUPPORTED_1000baseKX_Full, - ADVERTISED_1000baseKX_Full, - SPEED_1000 - }, - - [MLX4_10GBASE_T] = { - SUPPORTED_10000baseT_Full, - ADVERTISED_10000baseT_Full, - SPEED_10000 - }, - [MLX4_10GBASE_CX4] = { - SUPPORTED_10000baseKX4_Full, - ADVERTISED_10000baseKX4_Full, - SPEED_10000 - }, - [MLX4_10GBASE_KX4] = { - SUPPORTED_10000baseKX4_Full, - ADVERTISED_10000baseKX4_Full, - SPEED_10000 - }, - [MLX4_10GBASE_KR] = { - SUPPORTED_10000baseKR_Full, - ADVERTISED_10000baseKR_Full, - SPEED_10000 - }, - [MLX4_10GBASE_CR] = { - SUPPORTED_10000baseKR_Full, - ADVERTISED_10000baseKR_Full, - SPEED_10000 - }, - [MLX4_10GBASE_SR] = { - SUPPORTED_10000baseKR_Full, - ADVERTISED_10000baseKR_Full, - SPEED_10000 - }, - - [MLX4_20GBASE_KR2] = { - SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full, - ADVERTISED_20000baseMLD2_Full | ADVERTISED_20000baseKR2_Full, - SPEED_20000 - }, - - [MLX4_40GBASE_CR4] = { - SUPPORTED_40000baseCR4_Full, - ADVERTISED_40000baseCR4_Full, - SPEED_40000 - }, - [MLX4_40GBASE_KR4] = { - SUPPORTED_40000baseKR4_Full, - ADVERTISED_40000baseKR4_Full, - SPEED_40000 - }, - [MLX4_40GBASE_SR4] = { - SUPPORTED_40000baseSR4_Full, - ADVERTISED_40000baseSR4_Full, - SPEED_40000 - }, - - [MLX4_56GBASE_KR4] = { - SUPPORTED_56000baseKR4_Full, - ADVERTISED_56000baseKR4_Full, - SPEED_56000 - }, - [MLX4_56GBASE_CR4] = { - SUPPORTED_56000baseCR4_Full, - ADVERTISED_56000baseCR4_Full, - SPEED_56000 - }, - [MLX4_56GBASE_SR4] = { - SUPPORTED_56000baseSR4_Full, - ADVERTISED_56000baseSR4_Full, - SPEED_56000 - }, +static struct ptys2ethtool_config ptys2ethtool_map[MLX4_LINK_MODES_SZ]; + +void __init mlx4_en_init_ptys2ethtool_map(void) +{ + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_100BASE_TX, SPEED_100, + ETHTOOL_LINK_MODE_100baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KX4, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_CR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_KR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_40GBASE_SR4, SPEED_40000, + ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_KR4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_CR4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_56GBASE_SR4, SPEED_56000, + ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT); }; -static u32 ptys2ethtool_link_modes(u32 eth_proto, enum ethtool_report report) +static void ptys2ethtool_update_link_modes(unsigned long *link_modes, + u32 eth_proto, + enum ethtool_report report) { int i; - u32 link_modes = 0; - for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { if (eth_proto & MLX4_PROT_MASK(i)) - link_modes |= ptys2ethtool_map[i][report]; + bitmap_or(link_modes, link_modes, + ptys2ethtool_link_mode(&ptys2ethtool_map[i], + report), + __ETHTOOL_LINK_MODE_MASK_NBITS); } - return link_modes; } -static u32 ethtool2ptys_link_modes(u32 link_modes, enum ethtool_report report) +static u32 ethtool2ptys_link_modes(const unsigned long *link_modes, + enum ethtool_report report) { int i; u32 ptys_modes = 0; for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { - if (ptys2ethtool_map[i][report] & link_modes) + if (bitmap_intersects( + ptys2ethtool_link_mode(&ptys2ethtool_map[i], + report), + link_modes, + __ETHTOOL_LINK_MODE_MASK_NBITS)) ptys_modes |= 1 << i; } return ptys_modes; @@ -702,14 +687,15 @@ static u32 speed2ptys_link_modes(u32 speed) u32 ptys_modes = 0; for (i = 0; i < MLX4_LINK_MODES_SZ; i++) { - if (ptys2ethtool_map[i][SPEED] == speed) + if (ptys2ethtool_map[i].speed == speed) ptys_modes |= 1 << i; } return ptys_modes; } -static int ethtool_get_ptys_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static int +ethtool_get_ptys_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; @@ -737,79 +723,102 @@ static int ethtool_get_ptys_settings(struct net_device *dev, en_dbg(DRV, priv, "ptys_reg.eth_proto_lp_adv %x\n", be32_to_cpu(ptys_reg.eth_proto_lp_adv)); - cmd->supported = 0; - cmd->advertising = 0; + /* reset supported/advertising masks */ + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); - cmd->supported |= ptys_get_supported_port(&ptys_reg); + ptys2ethtool_update_supported_port(link_ksettings->link_modes.supported, + &ptys_reg); eth_proto = be32_to_cpu(ptys_reg.eth_proto_cap); - cmd->supported |= ptys2ethtool_link_modes(eth_proto, SUPPORTED); + ptys2ethtool_update_link_modes(link_ksettings->link_modes.supported, + eth_proto, SUPPORTED); eth_proto = be32_to_cpu(ptys_reg.eth_proto_admin); - cmd->advertising |= ptys2ethtool_link_modes(eth_proto, ADVERTISED); + ptys2ethtool_update_link_modes(link_ksettings->link_modes.advertising, + eth_proto, ADVERTISED); - cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - cmd->advertising |= (priv->prof->tx_pause) ? ADVERTISED_Pause : 0; + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Pause); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + Asym_Pause); - cmd->advertising |= (priv->prof->tx_pause ^ priv->prof->rx_pause) ? - ADVERTISED_Asym_Pause : 0; + if (priv->prof->tx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Pause); + if (priv->prof->tx_pause ^ priv->prof->rx_pause) + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Asym_Pause); - cmd->port = ptys_get_active_port(&ptys_reg); - cmd->transceiver = (SUPPORTED_TP & cmd->supported) ? - XCVR_EXTERNAL : XCVR_INTERNAL; + link_ksettings->base.port = ptys_get_active_port(&ptys_reg); if (mlx4_en_autoneg_get(dev)) { - cmd->supported |= SUPPORTED_Autoneg; - cmd->advertising |= ADVERTISED_Autoneg; + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, Autoneg); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, Autoneg); } - cmd->autoneg = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? + link_ksettings->base.autoneg + = (priv->port_state.flags & MLX4_EN_PORT_ANC) ? AUTONEG_ENABLE : AUTONEG_DISABLE; eth_proto = be32_to_cpu(ptys_reg.eth_proto_lp_adv); - cmd->lp_advertising = ptys2ethtool_link_modes(eth_proto, ADVERTISED); - cmd->lp_advertising |= (priv->port_state.flags & MLX4_EN_PORT_ANC) ? - ADVERTISED_Autoneg : 0; + ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising); + ptys2ethtool_update_link_modes( + link_ksettings->link_modes.lp_advertising, + eth_proto, ADVERTISED); + if (priv->port_state.flags & MLX4_EN_PORT_ANC) + ethtool_link_ksettings_add_link_mode(link_ksettings, + lp_advertising, Autoneg); - cmd->phy_address = 0; - cmd->mdio_support = 0; - cmd->maxtxpkt = 0; - cmd->maxrxpkt = 0; - cmd->eth_tp_mdix = ETH_TP_MDI_INVALID; - cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + link_ksettings->base.phy_address = 0; + link_ksettings->base.mdio_support = 0; + link_ksettings->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + link_ksettings->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; return ret; } -static void ethtool_get_default_settings(struct net_device *dev, - struct ethtool_cmd *cmd) +static void +ethtool_get_default_link_ksettings( + struct net_device *dev, struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); int trans_type; - cmd->autoneg = AUTONEG_DISABLE; - cmd->supported = SUPPORTED_10000baseT_Full; - cmd->advertising = ADVERTISED_10000baseT_Full; - trans_type = priv->port_state.transceiver; + link_ksettings->base.autoneg = AUTONEG_DISABLE; + + ethtool_link_ksettings_zero_link_mode(link_ksettings, supported); + ethtool_link_ksettings_add_link_mode(link_ksettings, supported, + 10000baseT_Full); + ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); + ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, + 10000baseT_Full); + + trans_type = priv->port_state.transceiver; if (trans_type > 0 && trans_type <= 0xC) { - cmd->port = PORT_FIBRE; - cmd->transceiver = XCVR_EXTERNAL; - cmd->supported |= SUPPORTED_FIBRE; - cmd->advertising |= ADVERTISED_FIBRE; + link_ksettings->base.port = PORT_FIBRE; + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, FIBRE); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, FIBRE); } else if (trans_type == 0x80 || trans_type == 0) { - cmd->port = PORT_TP; - cmd->transceiver = XCVR_INTERNAL; - cmd->supported |= SUPPORTED_TP; - cmd->advertising |= ADVERTISED_TP; + link_ksettings->base.port = PORT_TP; + ethtool_link_ksettings_add_link_mode(link_ksettings, + supported, TP); + ethtool_link_ksettings_add_link_mode(link_ksettings, + advertising, TP); } else { - cmd->port = -1; - cmd->transceiver = -1; + link_ksettings->base.port = -1; } } -static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int +mlx4_en_get_link_ksettings(struct net_device *dev, + struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); int ret = -EINVAL; @@ -822,16 +831,16 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) priv->port_state.flags & MLX4_EN_PORT_ANE); if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) - ret = ethtool_get_ptys_settings(dev, cmd); + ret = ethtool_get_ptys_link_ksettings(dev, link_ksettings); if (ret) /* ETH PROT CRTL is not supported or PTYS CMD failed */ - ethtool_get_default_settings(dev, cmd); + ethtool_get_default_link_ksettings(dev, link_ksettings); if (netif_carrier_ok(dev)) { - ethtool_cmd_speed_set(cmd, priv->port_state.link_speed); - cmd->duplex = DUPLEX_FULL; + link_ksettings->base.speed = priv->port_state.link_speed; + link_ksettings->base.duplex = DUPLEX_FULL; } else { - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN); - cmd->duplex = DUPLEX_UNKNOWN; + link_ksettings->base.speed = SPEED_UNKNOWN; + link_ksettings->base.duplex = DUPLEX_UNKNOWN; } return 0; } @@ -855,21 +864,29 @@ static __be32 speed_set_ptys_admin(struct mlx4_en_priv *priv, u32 speed, return proto_admin; } -static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +static int +mlx4_en_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *link_ksettings) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_ptys_reg ptys_reg; __be32 proto_admin; int ret; - u32 ptys_adv = ethtool2ptys_link_modes(cmd->advertising, ADVERTISED); - int speed = ethtool_cmd_speed(cmd); + u32 ptys_adv = ethtool2ptys_link_modes( + link_ksettings->link_modes.advertising, ADVERTISED); + const int speed = link_ksettings->base.speed; - en_dbg(DRV, priv, "Set Speed=%d adv=0x%x autoneg=%d duplex=%d\n", - speed, cmd->advertising, cmd->autoneg, cmd->duplex); + en_dbg(DRV, priv, + "Set Speed=%d adv={%*pbl} autoneg=%d duplex=%d\n", + speed, __ETHTOOL_LINK_MODE_MASK_NBITS, + link_ksettings->link_modes.advertising, + link_ksettings->base.autoneg, + link_ksettings->base.duplex); - if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || - (cmd->duplex == DUPLEX_HALF)) + if (!(priv->mdev->dev->caps.flags2 & + MLX4_DEV_CAP_FLAG2_ETH_PROT_CTRL) || + (link_ksettings->base.duplex == DUPLEX_HALF)) return -EINVAL; memset(&ptys_reg, 0, sizeof(ptys_reg)); @@ -883,7 +900,7 @@ static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return 0; } - proto_admin = cmd->autoneg == AUTONEG_ENABLE ? + proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ? cpu_to_be32(ptys_adv) : speed_set_ptys_admin(priv, speed, ptys_reg.eth_proto_cap); @@ -1982,8 +1999,8 @@ static int mlx4_en_set_phys_id(struct net_device *dev, const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, - .get_settings = mlx4_en_get_settings, - .set_settings = mlx4_en_set_settings, + .get_link_ksettings = mlx4_en_get_link_ksettings, + .set_link_ksettings = mlx4_en_set_link_ksettings, .get_link = ethtool_op_get_link, .get_strings = mlx4_en_get_strings, .get_sset_count = mlx4_en_get_sset_count, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index e0ec280a7fa1..bf7628db098a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -382,6 +382,7 @@ static void mlx4_en_verify_params(void) static int __init mlx4_en_init(void) { mlx4_en_verify_params(); + mlx4_en_init_ptys2ethtool_map(); return mlx4_register_interface(&mlx4_en_interface); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 35de7d2e6b34..d12ab6a73344 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -607,6 +607,7 @@ static inline struct mlx4_cqe *mlx4_en_get_cqe(void *buf, int idx, int cqe_sz) #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) +void mlx4_en_init_ptys2ethtool_map(void); void mlx4_en_update_loopback_state(struct net_device *dev, netdev_features_t features); -- cgit v1.2.3 From 59097ac9b20a0318b2b99b3338c0bb066309583e Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Wed, 24 Feb 2016 09:15:43 +0100 Subject: can: ems_usb: fix coding style This patch fixes the coding style issues introduced in commit: 90cfde46586d can: ems_usb: Fix possible tx overflow Reported-by: Sergei Shtylyov Signed-off-by: Marc Kleine-Budde --- drivers/net/can/usb/ems_usb.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index eb7192fab593..3400fd1cada7 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -281,11 +281,9 @@ static void ems_usb_read_interrupt_callback(struct urb *urb) switch (urb->status) { case 0: dev->free_slots = dev->intr_in_buffer[1]; - if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){ - if (netif_queue_stopped(netdev)){ - netif_wake_queue(netdev); - } - } + if (dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH && + netif_queue_stopped(netdev)) + netif_wake_queue(netdev); break; case -ECONNRESET: /* unlink */ -- cgit v1.2.3 From 0dfa61bba38513957240660a9cad82dd408789ca Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 24 Feb 2016 10:56:32 +0900 Subject: can: rcar: add gen[12] fallback compatibility strings Add fallback compatibility string for R-Car Gen 1 and Gen2. In the case of Renesas R-Car hardware we know that there are generations of SoCs, e.g. Gen 1 and Gen 2. But beyond that its not clear what the relationship between IP blocks might be. For example, I believe that r8a7779 is older than r8a7778 but that doesn't imply that the latter is a descendant of the former or vice versa. We can, however, by examining the documentation and behaviour of the hardware at run-time observe that the current driver implementation appears to be compatible with the IP blocks on SoCs within a given generation. For the above reasons and convenience when enabling new SoCs a per-generation fallback compatibility string scheme being adopted for drivers for Renesas SoCs. Signed-off-by: Simon Horman Acked-by: Rob Herring Acked-by: Geert Uytterhoeven Signed-off-by: Marc Kleine-Budde --- Documentation/devicetree/bindings/net/can/rcar_can.txt | 8 +++++++- drivers/net/can/rcar_can.c | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt index 002d8440bf66..f2172fb42cd8 100644 --- a/Documentation/devicetree/bindings/net/can/rcar_can.txt +++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt @@ -6,6 +6,12 @@ Required properties: "renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC. "renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC. "renesas,can-r8a7791" if CAN controller is a part of R8A7791 SoC. + "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. + "renesas,rcar-gen2-can" for a generic R-Car Gen2 compatible device. + When compatible with the generic version, nodes must list the + SoC-specific version corresponding to the platform first + followed by the generic version. + - reg: physical base address and size of the R-Car CAN register map. - interrupts: interrupt specifier for the sole interrupt. - clocks: phandles and clock specifiers for 3 CAN clock inputs. @@ -25,7 +31,7 @@ Example SoC common .dtsi file: can0: can@e6e80000 { - compatible = "renesas,can-r8a7791"; + compatible = "renesas,can-r8a7791", "renesas,rcar-gen2-can"; reg = <0 0xe6e80000 0 0x1000>; interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>; clocks = <&mstp9_clks R8A7791_CLK_RCAN0>, diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index bc46be39549d..ad3d2e0cb191 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -904,6 +904,8 @@ static const struct of_device_id rcar_can_of_table[] __maybe_unused = { { .compatible = "renesas,can-r8a7779" }, { .compatible = "renesas,can-r8a7790" }, { .compatible = "renesas,can-r8a7791" }, + { .compatible = "renesas,rcar-gen1-can" }, + { .compatible = "renesas,rcar-gen2-can" }, { } }; MODULE_DEVICE_TABLE(of, rcar_can_of_table); -- cgit v1.2.3 From f73e0c24bf075cf934e73133f5e89052afac1d32 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 27 Feb 2016 14:31:43 +0300 Subject: rocker: fix an error code We intended to return PTR_ERR() here instead of 1. Fixes: 1f9993f6825f ('rocker: fix a neigh entry leak issue') Signed-off-by: Dan Carpenter Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker_ofdpa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 099008a53b03..07218c360d86 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -1449,7 +1449,7 @@ static int ofdpa_port_ipv4_resolve(struct ofdpa_port *ofdpa_port, if (!n) { n = neigh_create(&arp_tbl, &ip_addr, dev); if (IS_ERR(n)) - return IS_ERR(n); + return PTR_ERR(n); } /* If the neigh is already resolved, then go ahead and -- cgit v1.2.3 From 0a3b7119000d706dfbc7e0c5b66e192a646d365f Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Wed, 16 Dec 2015 16:34:55 +0200 Subject: iwlwifi: mvm: add CT-KILL notification Up to today the driver was notified of the temperature from the FW and decided whether to enter CT-kill or not. From now on, the FW will decide when to enter CT-kill and will notify the driver. Add support for this notification. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 5 +++++ drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 12 +++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 15 +++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 +++ drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 27 +++++++++++++++++++++++- 5 files changed, 61 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index e2dbc67a367b..724a3eef78b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -318,6 +318,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon * from AP and will send it upon d0i3 exit. * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2 + * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill + * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature + * thresholds reporting * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -351,6 +354,8 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = (__force iwl_ucode_tlv_capa_t)71, IWL_UCODE_TLV_CAPA_BEACON_STORING = (__force iwl_ucode_tlv_capa_t)72, IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, + IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, + IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index f332497e29d1..ecbf7cb600ce 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -279,6 +279,7 @@ enum { */ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + CT_KILL_NOTIFICATION = 0xFE, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; @@ -1685,6 +1686,17 @@ struct iwl_dts_measurement_notif { __le32 voltage; } __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ +/** + * struct ct_kill_notif - CT-kill entry notification + * + * @temperature: the current temperature in celsius + * @reserved: reserved + */ +struct ct_kill_notif { + __le16 temperature; + __le16 reserved; +} __packed; /* GRP_PHY_CT_KILL_NTF */ + /*********************************** * TDLS API ***********************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ebe37bb0ce4c..200bbb76ff0a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -1028,6 +1030,18 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT); } +static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) +{ + /* these two TLV are redundant since the responsibility to CT-kill by + * FW happens only after we send at least one command of + * temperature THs report. + */ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && + fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -1502,6 +1516,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); void iwl_mvm_tt_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); +void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 09a94a5efb61..ecc371e1f3f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -263,6 +263,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, iwl_mvm_temp_notif, true), + RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, + iwl_mvm_ct_kill_notif, false), RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, true), @@ -387,6 +389,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 758d05a8c6aa..6ba391099d7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -7,6 +7,7 @@ * * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,7 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -171,6 +172,24 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) iwl_mvm_tt_temp_changed(mvm, temp); } +void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct ct_kill_notif *notif; + int len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ON_ONCE(len != sizeof(*notif))) { + IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n"); + return; + } + + notif = (struct ct_kill_notif *)pkt->data; + IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n", + notif->temperature); + + iwl_mvm_enter_ctkill(mvm); +} + static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm) { struct iwl_dts_measurement_cmd cmd = { @@ -236,6 +255,12 @@ static void check_exit_ctkill(struct work_struct *work) tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work); mvm = container_of(tt, struct iwl_mvm, thermal_throttle); + if (iwl_mvm_is_tt_in_fw(mvm)) { + iwl_mvm_exit_ctkill(mvm); + + return; + } + duration = tt->params.ct_kill_duration; mutex_lock(&mvm->mutex); -- cgit v1.2.3 From c221daf219b1cf38e7c4307f0f420ea826678af5 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Tue, 29 Dec 2015 09:54:49 +0200 Subject: iwlwifi: mvm: add registration to thermal zone Register to thermal_zone interface and implement the thermal ops. The thermal handles the device throttling, and sets the the temperature thresholds the Thermal Manager would be notified of crossing. The thermal interface adds a new thermal zone device sensor under /sys/class/thermal/ folder. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 33 ++- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 13 ++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 31 ++- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 6 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 287 +++++++++++++++++++++++- 5 files changed, 353 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index ecbf7cb600ce..e692098a9f1e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -279,6 +279,7 @@ enum { */ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + TEMP_REPORTING_THRESHOLDS_CMD = 0x04, CT_KILL_NOTIFICATION = 0xFE, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; @@ -1676,15 +1677,28 @@ struct iwl_ext_dts_measurement_cmd { } __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */ /** - * iwl_dts_measurement_notif - notification received with the measurements + * struct iwl_dts_measurement_notif_v1 - measurements notification * * @temp: the measured temperature * @voltage: the measured voltage */ -struct iwl_dts_measurement_notif { +struct iwl_dts_measurement_notif_v1 { __le32 temp; __le32 voltage; -} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S */ +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/ + +/** + * struct iwl_dts_measurement_notif_v2 - measurements notification + * + * @temp: the measured temperature + * @voltage: the measured voltage + * @threshold_idx: the trip index that was crossed + */ +struct iwl_dts_measurement_notif_v2 { + __le32 temp; + __le32 voltage; + __le32 threshold_idx; +} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */ /** * struct ct_kill_notif - CT-kill entry notification @@ -1697,6 +1711,19 @@ struct ct_kill_notif { __le16 reserved; } __packed; /* GRP_PHY_CT_KILL_NTF */ +#define IWL_MAX_DTS_TRIPS 8 + +/** + * struct iwl_temp_report_ths_cmd - set temperature thresholds + * + * @num_temps: number of temperature thresholds passed + * @thresholds: array with the thresholds to be configured + */ +struct temp_report_ths_cmd { + __le32 num_temps; + __le16 thresholds[IWL_MAX_DTS_TRIPS]; +} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */ + /*********************************** * TDLS API ***********************************/ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 070e2af05ca2..07f2cbd9c8e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -952,8 +952,21 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; } +#ifdef CONFIG_THERMAL + if (iwl_mvm_is_tt_in_fw(mvm)) { + /* in order to give the responsibility of ct-kill and + * TX backoff to FW we need to send empty temperature reporting + * cmd during init time + */ + iwl_mvm_send_temp_report_ths_cmd(mvm); + } else { + /* Initialize tx backoffs to the minimal possible */ + iwl_mvm_tt_tx_backoff(mvm, 0); + } +#else /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); +#endif WARN_ON(iwl_mvm_config_ltr(mvm)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 200bbb76ff0a..87d3e2884886 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -73,6 +73,10 @@ #include #include +#ifdef CONFIG_THERMAL +#include +#endif + #include "iwl-op-mode.h" #include "iwl-trans.h" #include "iwl-notif-wait.h" @@ -519,6 +523,20 @@ struct iwl_mvm_tt_mgmt { bool throttle; }; +#ifdef CONFIG_THERMAL +/** + *struct iwl_mvm_thermal_device - thermal zone related data + * @temp_trips: temperature thresholds for report + * @fw_trips_index: keep indexes to original array - temp_trips + * @tzone: thermal zone device data +*/ +struct iwl_mvm_thermal_device { + s16 temp_trips[IWL_MAX_DTS_TRIPS]; + u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; + struct thermal_zone_device *tzone; +}; +#endif + #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 struct iwl_mvm_frame_stats { @@ -799,6 +817,10 @@ struct iwl_mvm { /* Thermal Throttling and CTkill */ struct iwl_mvm_tt_mgmt thermal_throttle; +#ifdef CONFIG_THERMAL + struct iwl_mvm_thermal_device tz_device; +#endif + s32 temperature; /* Celsius */ /* * Debug option to set the NIC temperature. This option makes the @@ -1032,6 +1054,7 @@ static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) { +#ifdef CONFIG_THERMAL /* these two TLV are redundant since the responsibility to CT-kill by * FW happens only after we send at least one command of * temperature THs report. @@ -1040,6 +1063,9 @@ static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) && fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT); +#else /* CONFIG_THERMAL */ + return false; +#endif /* CONFIG_THERMAL */ } extern const u8 iwl_mvm_ac_to_tx_fifo[]; @@ -1512,11 +1538,12 @@ void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); -void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff); -void iwl_mvm_tt_exit(struct iwl_mvm *mvm); +void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff); +void iwl_mvm_thermal_exit(struct iwl_mvm *mvm); void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ecc371e1f3f0..a7acadd446c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -389,6 +389,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; @@ -591,7 +592,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, mvm->cfg->name, mvm->trans->hw_rev); min_backoff = calc_min_backoff(trans, cfg); - iwl_mvm_tt_initialize(mvm, min_backoff); + iwl_mvm_thermal_initialize(mvm, min_backoff); if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; @@ -664,6 +665,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, out_unregister: ieee80211_unregister_hw(mvm->hw); iwl_mvm_leds_exit(mvm); + iwl_mvm_thermal_exit(mvm); out_free: flush_delayed_work(&mvm->fw_dump_wk); iwl_phy_db_free(mvm->phy_db); @@ -681,7 +683,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) iwl_mvm_leds_exit(mvm); - iwl_mvm_tt_exit(mvm); + iwl_mvm_thermal_exit(mvm); ieee80211_unregister_hw(mvm->hw); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 6ba391099d7e..466d169b0e62 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -65,6 +65,8 @@ * *****************************************************************************/ +#include + #include "mvm.h" #define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT HZ @@ -80,8 +82,10 @@ static void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm) IWL_ERR(mvm, "Enter CT Kill\n"); iwl_mvm_set_hw_ctkill_state(mvm, true); - tt->throttle = false; - tt->dynamic_smps = false; + if (!iwl_mvm_is_tt_in_fw(mvm)) { + tt->throttle = false; + tt->dynamic_smps = false; + } /* Don't schedule an exit work if we're in test mode, since * the temperature will not change unless we manually set it @@ -117,18 +121,21 @@ void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp) static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt) { - struct iwl_dts_measurement_notif *notif; + struct iwl_dts_measurement_notif_v1 *notif_v1; int len = iwl_rx_packet_payload_len(pkt); int temp; - if (WARN_ON_ONCE(len < sizeof(*notif))) { + /* we can use notif_v1 only, because v2 only adds an additional + * parameter, which is not used in this function. + */ + if (WARN_ON_ONCE(len < sizeof(*notif_v1))) { IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); return -EINVAL; } - notif = (void *)pkt->data; + notif_v1 = (void *)pkt->data; - temp = le32_to_cpu(notif->temp); + temp = le32_to_cpu(notif_v1->temp); /* shouldn't be negative, but since it's s32, make sure it isn't */ if (WARN_ON_ONCE(temp < 0)) @@ -159,17 +166,56 @@ static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait, void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_dts_measurement_notif_v2 *notif_v2; + int len = iwl_rx_packet_payload_len(pkt); int temp; + u32 ths_crossed; /* the notification is handled synchronously in ctkill, so skip here */ if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) return; temp = iwl_mvm_temp_notif_parse(mvm, pkt); - if (temp < 0) + + if (!iwl_mvm_is_tt_in_fw(mvm)) { + if (temp >= 0) + iwl_mvm_tt_temp_changed(mvm, temp); + return; + } + + if (WARN_ON_ONCE(len < sizeof(*notif_v2))) { + IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n"); + return; + } + + notif_v2 = (void *)pkt->data; + ths_crossed = le32_to_cpu(notif_v2->threshold_idx); + + /* 0xFF in ths_crossed means the notification is not related + * to a trip, so we can ignore it here. + */ + if (ths_crossed == 0xFF) + return; + + IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n", + temp, ths_crossed); + +#ifdef CONFIG_THERMAL + if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS)) return; - iwl_mvm_tt_temp_changed(mvm, temp); + /* + * We are now handling a temperature notification from the firmware + * in ASYNC and hold the mutex. thermal_notify_framework will call + * us back through get_temp() which ought to send a SYNC command to + * the firmware and hence to take the mutex. + * Avoid the deadlock by unlocking the mutex here. + */ + mutex_unlock(&mvm->mutex); + thermal_notify_framework(mvm->tz_device.tzone, + mvm->tz_device.fw_trips_index[ths_crossed]); + mutex_lock(&mvm->mutex); +#endif /* CONFIG_THERMAL */ } void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) @@ -460,7 +506,220 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = { .support_tx_backoff = true, }; -void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) +#ifdef CONFIG_THERMAL +static int compare_temps(const void *a, const void *b) +{ + return ((s16)le16_to_cpu(*(__le16 *)a) - + (s16)le16_to_cpu(*(__le16 *)b)); +} + +int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) +{ + struct temp_report_ths_cmd cmd = {0}; + int ret, i, j, idx = 0; + + lockdep_assert_held(&mvm->mutex); + + /* The driver holds array of temperature trips that are unsorted + * and uncompressed, the FW should get it compressed and sorted + */ + + /* compress temp_trips to cmd array, remove uninitialized values*/ + for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) + if (mvm->tz_device.temp_trips[i] != S16_MIN) { + cmd.thresholds[idx++] = + cpu_to_le16(mvm->tz_device.temp_trips[i]); + } + cmd.num_temps = cpu_to_le32(idx); + + if (!idx) + goto send; + + /*sort cmd array*/ + sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL); + + /* we should save the indexes of trips because we sort + * and compress the orginal array + */ + for (i = 0; i < idx; i++) { + for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) { + if (le16_to_cpu(cmd.thresholds[i]) == + mvm->tz_device.temp_trips[j]) + mvm->tz_device.fw_trips_index[i] = j; + } + } + +send: + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, + TEMP_REPORTING_THRESHOLDS_CMD), + 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n", + ret); + + return ret; +} + +static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, + int *temperature) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + int ret; + int temp; + + mutex_lock(&mvm->mutex); + + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { + ret = -EIO; + goto out; + } + + ret = iwl_mvm_get_temp(mvm, &temp); + if (ret) + goto out; + + *temperature = temp * 1000; + +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device, + int trip, int *temp) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + + if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) + return -EINVAL; + + *temp = mvm->tz_device.temp_trips[trip] * 1000; + + return 0; +} + +static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device, + int trip, enum thermal_trip_type *type) +{ + if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) + return -EINVAL; + + *type = THERMAL_TRIP_PASSIVE; + + return 0; +} + +static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, + int trip, int temp) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata; + struct iwl_mvm_thermal_device *tzone; + int i, ret; + s16 temperature; + + mutex_lock(&mvm->mutex); + + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { + ret = -EIO; + goto out; + } + + if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) { + ret = -EINVAL; + goto out; + } + + if ((temp / 1000) > S16_MAX) { + ret = -EINVAL; + goto out; + } + + temperature = (s16)(temp / 1000); + tzone = &mvm->tz_device; + + if (!tzone) { + ret = -EIO; + goto out; + } + + /* no updates*/ + if (tzone->temp_trips[trip] == temperature) { + ret = 0; + goto out; + } + + /* already existing temperature */ + for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { + if (tzone->temp_trips[i] == temperature) { + ret = -EINVAL; + goto out; + } + } + + tzone->temp_trips[trip] = temperature; + + ret = iwl_mvm_send_temp_report_ths_cmd(mvm); +out: + mutex_unlock(&mvm->mutex); + return ret; +} + +static struct thermal_zone_device_ops tzone_ops = { + .get_temp = iwl_mvm_tzone_get_temp, + .get_trip_temp = iwl_mvm_tzone_get_trip_temp, + .get_trip_type = iwl_mvm_tzone_get_trip_type, + .set_trip_temp = iwl_mvm_tzone_set_trip_temp, +}; + +/* make all trips writable */ +#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1) + +static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) +{ + int i; + char name[] = "iwlwifi"; + + if (!iwl_mvm_is_tt_in_fw(mvm)) { + mvm->tz_device.tzone = NULL; + + return; + } + + BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + + mvm->tz_device.tzone = thermal_zone_device_register(name, + IWL_MAX_DTS_TRIPS, + IWL_WRITABLE_TRIPS_MSK, + mvm, &tzone_ops, + NULL, 0, 0); + if (IS_ERR(mvm->tz_device.tzone)) { + IWL_DEBUG_TEMP(mvm, + "Failed to register to thermal zone (err = %ld)\n", + PTR_ERR(mvm->tz_device.tzone)); + return; + } + + /* 0 is a valid temperature, + * so initialize the array with S16_MIN which invalid temperature + */ + for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++) + mvm->tz_device.temp_trips[i] = S16_MIN; +} + +static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) +{ + if (!iwl_mvm_is_tt_in_fw(mvm)) + return; + + if (mvm->tz_device.tzone) { + IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); + thermal_zone_device_unregister(mvm->tz_device.tzone); + mvm->tz_device.tzone = NULL; + } +} +#endif /* CONFIG_THERMAL */ + +void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) { struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle; @@ -475,10 +734,18 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff) tt->dynamic_smps = false; tt->min_backoff = min_backoff; INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); + +#ifdef CONFIG_THERMAL + iwl_mvm_thermal_zone_register(mvm); +#endif } -void iwl_mvm_tt_exit(struct iwl_mvm *mvm) +void iwl_mvm_thermal_exit(struct iwl_mvm *mvm) { cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit); IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); + +#ifdef CONFIG_THERMAL + iwl_mvm_thermal_zone_unregister(mvm); +#endif } -- cgit v1.2.3 From 5c89e7bc557e1dd5650946a0fe4940166bc62abf Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Tue, 5 Jan 2016 10:34:47 +0200 Subject: iwlwifi: mvm: add registration to cooling device Register cooling device in order to have the Thermal Manager handle the device's power budget according to the sent notifications. The interface adds a new thermal cooling device to /sys/class/thermal/ folder. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 26 ++++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 5 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 19 +++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 1 + drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 147 +++++++++++++++++++++++ 6 files changed, 200 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 724a3eef78b1..63dc109605e3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -321,6 +321,7 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting + * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -356,6 +357,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = (__force iwl_ucode_tlv_capa_t)73, IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, + IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index e692098a9f1e..d5f90371054e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -279,6 +279,7 @@ enum { */ enum iwl_phy_ops_subcmd_ids { CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0, + CTDP_CONFIG_CMD = 0x03, TEMP_REPORTING_THRESHOLDS_CMD = 0x04, CT_KILL_NOTIFICATION = 0xFE, DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, @@ -1711,6 +1712,31 @@ struct ct_kill_notif { __le16 reserved; } __packed; /* GRP_PHY_CT_KILL_NTF */ +/** +* enum ctdp_cmd_operation - CTDP command operations +* @CTDP_CMD_OPERATION_START: update the current budget +* @CTDP_CMD_OPERATION_STOP: stop ctdp +* @CTDP_CMD_OPERATION_REPORT: get the avgerage budget +*/ +enum iwl_mvm_ctdp_cmd_operation { + CTDP_CMD_OPERATION_START = 0x1, + CTDP_CMD_OPERATION_STOP = 0x2, + CTDP_CMD_OPERATION_REPORT = 0x4, +};/* CTDP_CMD_OPERATION_TYPE_E */ + +/** + * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget + * + * @operation: see &enum iwl_mvm_ctdp_cmd_operation + * @budget: the budget in milliwatt + * @window_size: defined in API but not used + */ +struct iwl_mvm_ctdp_cmd { + __le32 operation; + __le32 budget; + __le32 window_size; +} __packed; + #define IWL_MAX_DTS_TRIPS 8 /** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 07f2cbd9c8e7..3e596e4b3999 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -963,6 +963,11 @@ int iwl_mvm_up(struct iwl_mvm *mvm) /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); } + + /* TODO: read the budget from BIOS / Platform NVM */ + if (iwl_mvm_is_ctdp_supported(mvm) && mvm->cooling_dev.cur_state > 0) + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, + mvm->cooling_dev.cur_state); #else /* Initialize tx backoffs to the minimal possible */ iwl_mvm_tt_tx_backoff(mvm, 0); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 87d3e2884886..d867cb478681 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -535,6 +535,16 @@ struct iwl_mvm_thermal_device { u8 fw_trips_index[IWL_MAX_DTS_TRIPS]; struct thermal_zone_device *tzone; }; + +/* + * iwl_mvm_cooling_device + * @cur_state: current state in milliwatts + * @cdev: struct thermal cooling device + */ +struct iwl_mvm_cooling_device { + u32 cur_state; + struct thermal_cooling_device *cdev; +}; #endif #define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8 @@ -819,6 +829,7 @@ struct iwl_mvm { struct iwl_mvm_tt_mgmt thermal_throttle; #ifdef CONFIG_THERMAL struct iwl_mvm_thermal_device tz_device; + struct iwl_mvm_cooling_device cooling_dev; #endif s32 temperature; /* Celsius */ @@ -1068,6 +1079,12 @@ static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm) #endif /* CONFIG_THERMAL */ } +static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm) +{ + return fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_CTDP_SUPPORT); +} + extern const u8 iwl_mvm_ac_to_tx_fifo[]; struct iwl_rate_info { @@ -1544,6 +1561,8 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); +int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm); +int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); /* Location Aware Regulatory */ struct iwl_mcc_update_resp * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index a7acadd446c4..ec3604678a56 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -389,6 +389,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE), + HCMD_NAME(CTDP_CONFIG_CMD), HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD), HCMD_NAME(CT_KILL_NOTIFICATION), HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 466d169b0e62..999bcb898be8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -706,6 +706,139 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) mvm->tz_device.temp_trips[i] = S16_MIN; } +static const u32 iwl_mvm_cdev_budgets[] = { + 2000, /* cooling state 0 */ + 1800, /* cooling state 1 */ + 1600, /* cooling state 2 */ + 1400, /* cooling state 3 */ + 1200, /* cooling state 4 */ + 1000, /* cooling state 5 */ + 900, /* cooling state 6 */ + 800, /* cooling state 7 */ + 700, /* cooling state 8 */ + 650, /* cooling state 9 */ + 600, /* cooling state 10 */ + 550, /* cooling state 11 */ + 500, /* cooling state 12 */ + 450, /* cooling state 13 */ + 400, /* cooling state 14 */ + 350, /* cooling state 15 */ + 300, /* cooling state 16 */ + 250, /* cooling state 17 */ + 200, /* cooling state 18 */ + 150, /* cooling state 19 */ +}; + +int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget) +{ + struct iwl_mvm_ctdp_cmd cmd = { + .operation = cpu_to_le32(op), + .budget = cpu_to_le32(budget), + .window_size = 0, + }; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, + CTDP_CONFIG_CMD), + sizeof(cmd), &cmd, &status); + + if (ret) { + IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret); + return ret; + } + + if (op == CTDP_CMD_OPERATION_START) + mvm->cooling_dev.cur_state = budget; + + else if (op == CTDP_CMD_OPERATION_REPORT) + IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status); + + return 0; +} + +static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1; + + return 0; +} + +static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); + + if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) + return -EBUSY; + + *state = mvm->cooling_dev.cur_state; + return 0; +} + +static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long new_state) +{ + struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); + int ret; + + if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) + return -EIO; + + if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) + return -EBUSY; + + mutex_lock(&mvm->mutex); + + if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { + ret = -EINVAL; + goto unlock; + } + + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, + iwl_mvm_cdev_budgets[new_state]); + +unlock: + mutex_unlock(&mvm->mutex); + return ret; +} + +static struct thermal_cooling_device_ops tcooling_ops = { + .get_max_state = iwl_mvm_tcool_get_max_state, + .get_cur_state = iwl_mvm_tcool_get_cur_state, + .set_cur_state = iwl_mvm_tcool_set_cur_state, +}; + +int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) +{ + char name[] = "iwlwifi"; + + if (!iwl_mvm_is_ctdp_supported(mvm)) { + mvm->cooling_dev.cdev = NULL; + + return 0; + } + + BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); + + mvm->cooling_dev.cdev = + thermal_cooling_device_register(name, + mvm, + &tcooling_ops); + + if (IS_ERR(mvm->cooling_dev.cdev)) { + IWL_DEBUG_TEMP(mvm, + "Failed to register to cooling device (err = %ld)\n", + PTR_ERR(mvm->cooling_dev.cdev)); + return PTR_ERR(mvm->cooling_dev.cdev); + } + + return 0; +} + static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) { if (!iwl_mvm_is_tt_in_fw(mvm)) @@ -717,6 +850,18 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) mvm->tz_device.tzone = NULL; } } + +static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) +{ + if (!iwl_mvm_is_ctdp_supported(mvm)) + return; + + if (mvm->cooling_dev.cdev) { + IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); + thermal_cooling_device_unregister(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; + } +} #endif /* CONFIG_THERMAL */ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) @@ -736,6 +881,7 @@ void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff) INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill); #ifdef CONFIG_THERMAL + iwl_mvm_cooling_device_register(mvm); iwl_mvm_thermal_zone_register(mvm); #endif } @@ -746,6 +892,7 @@ void iwl_mvm_thermal_exit(struct iwl_mvm *mvm) IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n"); #ifdef CONFIG_THERMAL + iwl_mvm_cooling_device_unregister(mvm); iwl_mvm_thermal_zone_unregister(mvm); #endif } -- cgit v1.2.3 From 25c2b22caceedf015d5852e3abb46d2e9271a3bd Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 7 Feb 2016 13:09:59 +0200 Subject: iwlwifi: mvm: set the correct descriptor size for tracing The 9000 series uses a diffrent sized descriptor. Update the relevant tracing field. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ec3604678a56..3661a9db5a0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -479,8 +479,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; + trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); } else { op_mode->ops = &iwl_mvm_ops; + trans->rx_mpdu_cmd_hdr_size = + sizeof(struct iwl_rx_mpdu_res_start); if (WARN_ON(trans->num_rx_queues > 1)) goto out_free; @@ -572,7 +575,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_trans_configure(mvm->trans, &trans_cfg); trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start); trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv; trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num; memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, -- cgit v1.2.3 From dd4d3161d0f229031914f403721ced8cb64d4d52 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 7 Feb 2016 12:50:35 +0200 Subject: iwlwifi: mvm: fix RSS key sizing The initialization and copying of the RSS secret key should not use ARRAY_SIZE as we need to initialize a dword array, and not a byte array. Fix also the hook maximum write size to allow writing a longer table - up to full indirection table size. Fixes: 43413a975d06("iwlwifi: mvm: support rss queues configuration command") Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 5 +++-- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 2 +- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index c529e5355803..54d0d7d2d0b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -976,7 +976,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm, memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table, ARRAY_SIZE(cmd.indirection_table) % nbytes); - memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); + memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); mutex_lock(&mvm->mutex); ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); @@ -1497,7 +1497,8 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); -MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16); +MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, + (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); #ifdef CONFIG_IWLWIFI_BCAST_FILTERING MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 3e596e4b3999..05475a2bff90 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -121,7 +121,7 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++) cmd.indirection_table[i] = i % mvm->trans->num_rx_queues; - memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key)); + memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key)); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 3661a9db5a0b..c5b6e8671169 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -661,7 +661,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); /* init RSS hash key */ - get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key)); + get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key)); return op_mode; -- cgit v1.2.3 From a339e918daf281a0eb78783abc954c1e97af16c4 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 2 Feb 2016 22:58:46 +0200 Subject: iwlwifi: mvm: handle pass all scan reporting The firmware doesn't send match found notifications when no matchsets are passed. This makes sense because if there are no matchsets, nothing can be matched. But the nl80211 API should report when there are results available, even if no matchsets were passed. To handle this, we can use the firmware's ITERATION_COMPLETE reporting, which will send us notifications every time it completed a scheduled scan iteration. Then we can set a flag when we received beacons and use that to report that results are available. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 7 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 6 +++++ drivers/net/wireless/intel/iwlwifi/mvm/scan.c | 36 +++++++++++++++++++++++---- 3 files changed, 44 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d867cb478681..bb811a29490d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -493,6 +493,12 @@ enum iwl_mvm_scan_type { IWL_SCAN_TYPE_FRAGMENTED, }; +enum iwl_mvm_sched_scan_pass_all_states { + SCHED_SCAN_PASS_ALL_DISABLED, + SCHED_SCAN_PASS_ALL_ENABLED, + SCHED_SCAN_PASS_ALL_FOUND, +}; + /** * struct iwl_nvm_section - describes an NVM section in memory. * @@ -687,6 +693,7 @@ struct iwl_mvm { void *scan_cmd; struct iwl_mcast_filter_cmd *mcast_filter_cmd; enum iwl_mvm_scan_type scan_type; + enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all; /* max number of simultaneous scans the FW supports */ unsigned int max_scans; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 145ec68ce6f9..055a8b0f7ad7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -448,6 +448,12 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, iwl_mvm_update_frame_stats(mvm, rate_n_flags, rx_status->flag & RX_FLAG_AMPDU_DETAILS); #endif + + if (unlikely((ieee80211_is_beacon(hdr->frame_control) || + ieee80211_is_probe_resp(hdr->frame_control)) && + mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)) + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND; + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status, crypt_len, rxb); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index aa6d8074f63a..09eb72c4ae43 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -297,6 +299,12 @@ void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); + + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { + IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); + ieee80211_sched_scan_results(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; + } } void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm, @@ -380,6 +388,7 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm, mvm->scan_status &= ~IWL_MVM_SCAN_SCHED; ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) { IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n", aborted ? "aborted" : "completed", @@ -533,10 +542,13 @@ static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, IWL_DEBUG_SCAN(mvm, "Sending scheduled scan with filtering, n_match_sets %d\n", req->n_match_sets); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; return false; } IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n"); + + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; return true; } @@ -788,6 +800,9 @@ static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm, flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; #endif + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) + flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE; + if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && params->type != IWL_SCAN_TYPE_FRAGMENTED) @@ -1074,6 +1089,9 @@ static u32 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm, flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; #endif + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED) + flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE; + if (iwl_mvm_is_regular_scan(params) && vif->type != NL80211_IFTYPE_P2P_DEVICE && params->type != IWL_SCAN_TYPE_FRAGMENTED) @@ -1301,10 +1319,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm, return -EBUSY; } - /* we don't support "match all" in the firmware */ - if (!req->n_match_sets) - return -EOPNOTSUPP; - ret = iwl_mvm_check_running_scans(mvm, type); if (ret) return ret; @@ -1400,6 +1414,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN); } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } mvm->scan_status &= ~mvm->scan_uid_status[uid]; @@ -1434,6 +1449,12 @@ void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, iwl_mvm_dump_channel_list(notif->results, notif->scanned_channels, buf, sizeof(buf))); + + if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) { + IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n"); + ieee80211_sched_scan_results(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED; + } } static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type) @@ -1528,6 +1549,7 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED); if (uid >= 0 && !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; mvm->scan_uid_status[uid] = 0; } @@ -1549,8 +1571,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm) * restart_hw, so do not report if FW is about to be * restarted. */ - if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && !mvm->restart_fw) + if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) && + !mvm->restart_fw) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; + } } } @@ -1586,6 +1611,7 @@ out: ieee80211_scan_completed(mvm->hw, true); } else if (notify) { ieee80211_sched_scan_stopped(mvm->hw); + mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED; } return ret; -- cgit v1.2.3 From 4494541c0c918688dfbbaf760548846a9c1bcf3f Mon Sep 17 00:00:00 2001 From: Eyal Shapira Date: Mon, 1 Feb 2016 09:07:05 +0200 Subject: iwlwifi: mvm: rs: fix a theoretical access to uninitialized array elements Klocwork is unhappy as ht_vht_rates might be accessed with rate->index being set to values between 0 and 3 which will lead to accessing uninitialized array elements. Effectively this doesn't happen as in HT/VHT we're not using these rate indices. Still fix this. Signed-off-by: Eyal Shapira Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 6e7e78a37879..3dc94d2e8a65 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -556,6 +556,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate) if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX)) rate_str = legacy_rates[rate->index]; else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) && + (rate->index >= IWL_RATE_MCS_0_INDEX) && (rate->index <= IWL_RATE_MCS_9_INDEX)) rate_str = ht_vht_rates[rate->index]; else -- cgit v1.2.3 From 5a51c03fa8f61b4c7554bad97426c87a93e4606b Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 23 Feb 2016 21:53:12 +0200 Subject: iwlwifi: mvm: bump firmware API to 21 The driver is now ready to handle this firmware. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-7000.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index fa41a5e1c890..fc475ce59b47 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -73,8 +73,8 @@ /* Highest firmware API version supported */ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 -#define IWL7265D_UCODE_API_MAX 20 -#define IWL3168_UCODE_API_MAX 20 +#define IWL7265D_UCODE_API_MAX 21 +#define IWL3168_UCODE_API_MAX 21 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index bce9b3420a13..ad8c588e3eed 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -70,8 +70,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 20 -#define IWL8265_UCODE_API_MAX 20 +#define IWL8000_UCODE_API_MAX 21 +#define IWL8265_UCODE_API_MAX 21 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 13 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 4b93404f46a7..083590698e72 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -55,7 +55,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 20 +#define IWL9000_UCODE_API_MAX 21 /* Oldest version we won't warn about */ #define IWL9000_UCODE_API_OK 13 -- cgit v1.2.3 From bac842da5b39f8f9521a8cb8d0b11e533424e90e Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 31 Jan 2016 09:29:39 +0200 Subject: iwlwifi: pcie: aggregate Flow Handler configuration writes Instead of waking up the device each time we write a register, wake it up once, and writes the registers at once. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 56 ++++++++++++------------- 1 file changed, 28 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 58591ca051fd..cda678dfad05 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -616,38 +616,38 @@ static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr, dma_addr_t phy_addr, u32 byte_cnt) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; int ret; trans_pcie->ucode_write_complete = false; - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); - - iwl_write_direct32(trans, - FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), - dst_addr); - - iwl_write_direct32(trans, - FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), - phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); - - iwl_write_direct32(trans, - FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), - (iwl_get_dma_hi_addr(phy_addr) - << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); - - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM | - 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX | - FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); - - iwl_write_direct32(trans, - FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | - FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | - FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); + if (!iwl_trans_grab_nic_access(trans, &flags)) + return -EIO; + + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE); + + iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), + dst_addr); + + iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL), + phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK); + + iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL), + (iwl_get_dma_hi_addr(phy_addr) + << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt); + + iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL), + BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) | + BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) | + FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID); + + iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL), + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | + FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE | + FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD); + + iwl_trans_release_nic_access(trans, &flags); ret = wait_event_timeout(trans_pcie->ucode_write_waitq, trans_pcie->ucode_write_complete, 5 * HZ); -- cgit v1.2.3 From 2e5d4a8f61dcae12996bbe2bcc446d3b47625cac Mon Sep 17 00:00:00 2001 From: Haim Dreyfuss Date: Thu, 17 Dec 2015 12:17:58 +0200 Subject: iwlwifi: pcie: Add new configuration to enable MSIX Working with MSIX requires prior configuration. This includes requesting interrupt vectors from the OS, registering the vectors and mapping the optional causes to the relevant interrupt. In addition add new interrupt handler to handle MSIX interrupt. Signed-off-by: Haim Dreyfuss Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 49 +++++ drivers/net/wireless/intel/iwlwifi/iwl-prph.h | 2 + drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 100 +++++++-- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 177 +++++++++++++++- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 229 ++++++++++++++++++--- 5 files changed, 507 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 163b21bc20cb..a79c4f61a851 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -7,6 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -549,4 +550,52 @@ enum dtd_diode_reg { DTS_DIODE_REG_FLAGS_PASS_ONCE = 0x00000080, /* bits [7:7] */ }; +/***************************************************************************** + * MSIX related registers * + *****************************************************************************/ + +#define CSR_MSIX_BASE (0x2000) +#define CSR_MSIX_FH_INT_CAUSES_AD (CSR_MSIX_BASE + 0x800) +#define CSR_MSIX_FH_INT_MASK_AD (CSR_MSIX_BASE + 0x804) +#define CSR_MSIX_HW_INT_CAUSES_AD (CSR_MSIX_BASE + 0x808) +#define CSR_MSIX_HW_INT_MASK_AD (CSR_MSIX_BASE + 0x80C) +#define CSR_MSIX_AUTOMASK_ST_AD (CSR_MSIX_BASE + 0x810) +#define CSR_MSIX_RX_IVAR_AD_REG (CSR_MSIX_BASE + 0x880) +#define CSR_MSIX_IVAR_AD_REG (CSR_MSIX_BASE + 0x890) +#define CSR_MSIX_PENDING_PBA_AD (CSR_MSIX_BASE + 0x1000) +#define CSR_MSIX_RX_IVAR(cause) (CSR_MSIX_RX_IVAR_AD_REG + (cause)) +#define CSR_MSIX_IVAR(cause) (CSR_MSIX_IVAR_AD_REG + (cause)) + +#define MSIX_FH_INT_CAUSES_Q(q) (q) + +/* + * Causes for the FH register interrupts + */ +enum msix_fh_int_causes { + MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16), + MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17), + MSIX_FH_INT_CAUSES_S2D = BIT(19), + MSIX_FH_INT_CAUSES_FH_ERR = BIT(21), +}; + +/* + * Causes for the HW register interrupts + */ +enum msix_hw_int_causes { + MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), + MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), + MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), + MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), + MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), + MSIX_HW_INT_CAUSES_REG_SW_ERR = BIT(25), + MSIX_HW_INT_CAUSES_REG_SCD = BIT(26), + MSIX_HW_INT_CAUSES_REG_FH_TX = BIT(27), + MSIX_HW_INT_CAUSES_REG_HW_ERR = BIT(29), + MSIX_HW_INT_CAUSES_REG_HAP = BIT(30), +}; + +#define MSIX_MIN_INTERRUPT_VECTORS 2 +#define MSIX_AUTO_CLEAR_CAUSE 0 +#define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) + #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 5bde23a472b4..c46e596e12b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -404,4 +404,6 @@ enum { LMPM_PAGE_PASS_NOTIF_POS = BIT(20), }; +#define UREG_CHICK (0xA05C00) +#define UREG_CHICK_MSIX_ENABLE BIT(25) #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 542bbc5e2b24..6677f3122226 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -336,6 +336,14 @@ struct iwl_tso_hdr_page { * @fw_mon_phys: physical address of the buffer for the firmware monitor * @fw_mon_page: points to the first page of the buffer for the firmware monitor * @fw_mon_size: size of the buffer for the firmware monitor + * @msix_entries: array of MSI-X entries + * @msix_enabled: true if managed to enable MSI-X + * @allocated_vector: the number of interrupt vector allocated by the OS + * @default_irq_num: default irq for non rx interrupt + * @fh_init_mask: initial unmasked fh causes + * @hw_init_mask: initial unmasked hw causes + * @fh_mask: current unmasked fh causes + * @hw_mask: current unmasked hw causes */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -402,6 +410,15 @@ struct iwl_trans_pcie { dma_addr_t fw_mon_phys; struct page *fw_mon_page; u32 fw_mon_size; + + struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; + bool msix_enabled; + u32 allocated_vector; + u32 default_irq_num; + u32 fh_init_mask; + u32 hw_init_mask; + u32 fh_mask; + u32 hw_mask; }; static inline struct iwl_trans_pcie * @@ -430,7 +447,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans); * RX ******************************************************/ int iwl_pcie_rx_init(struct iwl_trans *trans); +irqreturn_t iwl_pcie_msix_isr(int irq, void *data); irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); +irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); +irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); @@ -485,15 +505,24 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans); ******************************************************/ static inline void iwl_disable_interrupts(struct iwl_trans *trans) { - clear_bit(STATUS_INT_ENABLED, &trans->status); - - /* disable interrupts from uCode/NIC to host */ - iwl_write32(trans, CSR_INT_MASK, 0x00000000); + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - /* acknowledge/clear/reset any interrupts still pending - * from uCode or flow handler (Rx/Tx DMA) */ - iwl_write32(trans, CSR_INT, 0xffffffff); - iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); + clear_bit(STATUS_INT_ENABLED, &trans->status); + if (!trans_pcie->msix_enabled) { + /* disable interrupts from uCode/NIC to host */ + iwl_write32(trans, CSR_INT_MASK, 0x00000000); + + /* acknowledge/clear/reset any interrupts still pending + * from uCode or flow handler (Rx/Tx DMA) */ + iwl_write32(trans, CSR_INT, 0xffffffff); + iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); + } else { + /* disable all the interrupt we might use */ + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + trans_pcie->fh_init_mask); + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + trans_pcie->hw_init_mask); + } IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); } @@ -503,8 +532,37 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans) IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); set_bit(STATUS_INT_ENABLED, &trans->status); - trans_pcie->inta_mask = CSR_INI_SET_MASK; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INI_SET_MASK; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + /* + * fh/hw_mask keeps all the unmasked causes. + * Unlike msi, in msix cause is enabled when it is unset. + */ + trans_pcie->hw_mask = trans_pcie->hw_init_mask; + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + ~trans_pcie->fh_mask); + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + ~trans_pcie->hw_mask); + } +} + +static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk); + trans_pcie->hw_mask = msk; +} + +static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk); + trans_pcie->fh_mask = msk; } static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) @@ -512,8 +570,15 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n"); - trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INT_BIT_FH_TX; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, + trans_pcie->hw_init_mask); + iwl_enable_fh_int_msk_msix(trans, + MSIX_FH_INT_CAUSES_D2S_CH0_NUM); + } } static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) @@ -521,8 +586,15 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); - trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; - iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + if (!trans_pcie->msix_enabled) { + trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; + iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); + } else { + iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, + trans_pcie->fh_init_mask); + iwl_enable_hw_int_msk_msix(trans, + MSIX_HW_INT_CAUSES_REG_RF_KILL); + } } static inline void iwl_wake_queue(struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 07973ef826c1..c0db681d66d1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1135,10 +1135,10 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ -static void iwl_pcie_rx_handle(struct iwl_trans *trans) +static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq[0]; + struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; u32 r, i, j, count = 0; bool emergency = false; @@ -1259,6 +1259,51 @@ restart: napi_gro_flush(&rxq->napi, false); } +static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) +{ + u8 queue = entry->entry; + struct msix_entry *entries = entry - queue; + + return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); +} + +static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, + struct msix_entry *entry) +{ + /* + * Before sending the interrupt the HW disables it to prevent + * a nested interrupt. This is done by writing 1 to the corresponding + * bit in the mask register. After handling the interrupt, it should be + * re-enabled by clearing this bit. This register is defined as + * write 1 clear (W1C) register, meaning that it's being clear + * by writing 1 to the bit. + */ + iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); +} + +/* + * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw + * This interrupt handler should be used with RSS queue only. + */ +irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); + struct iwl_trans *trans = trans_pcie->trans; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + local_bh_disable(); + iwl_pcie_rx_handle(trans, entry->entry); + local_bh_enable(); + + iwl_pcie_clear_irq(trans, entry); + + lock_map_release(&trans->sync_cmd_lockdep_map); + + return IRQ_HANDLED; +} + /* * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card */ @@ -1589,7 +1634,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) isr_stats->rx++; local_bh_disable(); - iwl_pcie_rx_handle(trans); + iwl_pcie_rx_handle(trans, 0); local_bh_enable(); } @@ -1732,3 +1777,129 @@ irqreturn_t iwl_pcie_isr(int irq, void *data) return IRQ_WAKE_THREAD; } + +irqreturn_t iwl_pcie_msix_isr(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) +{ + struct msix_entry *entry = dev_id; + struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); + struct iwl_trans *trans = trans_pcie->trans; + struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; + u32 inta_fh, inta_hw; + + lock_map_acquire(&trans->sync_cmd_lockdep_map); + + spin_lock(&trans_pcie->irq_lock); + inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD); + inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + /* + * Clear causes registers to avoid being handling the same cause. + */ + iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); + iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); + spin_unlock(&trans_pcie->irq_lock); + + if (unlikely(!(inta_fh | inta_hw))) { + IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n"); + lock_map_release(&trans->sync_cmd_lockdep_map); + return IRQ_NONE; + } + + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n", + inta_fh, + iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD)); + + /* This "Tx" DMA channel is used only for loading uCode */ + if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) { + IWL_DEBUG_ISR(trans, "uCode load interrupt\n"); + isr_stats->tx++; + /* + * Wake up uCode load routine, + * now that load is complete + */ + trans_pcie->ucode_write_complete = true; + wake_up(&trans_pcie->ucode_write_waitq); + } + + /* Error detected by uCode */ + if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { + IWL_ERR(trans, + "Microcode SW error detected. Restarting 0x%X.\n", + inta_fh); + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + } + + /* After checking FH register check HW register */ + if (iwl_have_debug_level(IWL_DL_ISR)) + IWL_DEBUG_ISR(trans, + "ISR inta_hw 0x%08x, enabled 0x%08x\n", + inta_hw, + iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD)); + + /* Alive notification via Rx interrupt will do the real work */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { + IWL_DEBUG_ISR(trans, "Alive interrupt\n"); + isr_stats->alive++; + } + + /* uCode wakes up after power-down sleep */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); + + isr_stats->wakeup++; + } + + /* Chip got too hot and stopped itself */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) { + IWL_ERR(trans, "Microcode CT kill error detected.\n"); + isr_stats->ctkill++; + } + + /* HW RF KILL switch toggled */ + if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL) { + bool hw_rfkill; + + hw_rfkill = iwl_is_rfkill_set(trans); + IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", + hw_rfkill ? "disable radio" : "enable radio"); + + isr_stats->rfkill++; + + mutex_lock(&trans_pcie->mutex); + iwl_trans_pcie_rf_kill(trans, hw_rfkill); + mutex_unlock(&trans_pcie->mutex); + if (hw_rfkill) { + set_bit(STATUS_RFKILL, &trans->status); + if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status)) + IWL_DEBUG_RF_KILL(trans, + "Rfkill while SYNC HCMD in flight\n"); + wake_up(&trans_pcie->wait_command_queue); + } else { + clear_bit(STATUS_RFKILL, &trans->status); + } + } + + if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) { + IWL_ERR(trans, + "Hardware error detected. Restarting.\n"); + + isr_stats->hw++; + iwl_pcie_irq_handle_error(trans); + } + + iwl_pcie_clear_irq(trans, entry); + + lock_map_release(&trans->sync_cmd_lockdep_map); + + return IRQ_HANDLED; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index cda678dfad05..102d17564178 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1123,6 +1123,20 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) iwl_pcie_prepare_card_hw(trans); } +static void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans_pcie->msix_enabled) { + int i; + + for (i = 0; i < trans_pcie->allocated_vector; i++) + synchronize_irq(trans_pcie->msix_entries[i].vector); + } else { + synchronize_irq(trans_pcie->pci_dev->irq); + } +} + static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill) { @@ -1149,7 +1163,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, iwl_disable_interrupts(trans); /* Make sure it finished running */ - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); mutex_lock(&trans_pcie->mutex); @@ -1252,8 +1266,6 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state) static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - if (!reset) { /* Enable persistence mode to avoid reset */ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, @@ -1271,7 +1283,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, iwl_pcie_disable_ict(trans); - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); @@ -1350,6 +1362,153 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return 0; } +struct iwl_causes_list { + u32 cause_num; + u32 mask_reg; + u8 addr; +}; + +static struct iwl_causes_list causes_list[] = { + {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, + {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, + {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, + {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, + {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, + {MSIX_HW_INT_CAUSES_REG_WAKEUP, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, + {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, + {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29}, + {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, + {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, + {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, + {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, +}; + +static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie) +{ + u32 val, max_rx_vector, i; + struct iwl_trans *trans = trans_pcie->trans; + + max_rx_vector = trans_pcie->allocated_vector - 1; + + if (!trans_pcie->msix_enabled) + return; + + iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE); + + /* + * Each cause from the list above and the RX causes is represented as + * a byte in the IVAR table. We access the first (N - 1) bytes and map + * them to the (N - 1) vectors so these vectors will be used as rx + * vectors. Then access all non rx causes and map them to the + * default queue (N'th queue). + */ + for (i = 0; i < max_rx_vector; i++) { + iwl_write8(trans, CSR_MSIX_RX_IVAR(i), MSIX_FH_INT_CAUSES_Q(i)); + iwl_clear_bit(trans, CSR_MSIX_FH_INT_MASK_AD, + BIT(MSIX_FH_INT_CAUSES_Q(i))); + } + + for (i = 0; i < ARRAY_SIZE(causes_list); i++) { + val = trans_pcie->default_irq_num | + MSIX_NON_AUTO_CLEAR_CAUSE; + iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); + iwl_clear_bit(trans, causes_list[i].mask_reg, + causes_list[i].cause_num); + } + trans_pcie->fh_init_mask = + ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD); + trans_pcie->fh_mask = trans_pcie->fh_init_mask; + trans_pcie->hw_init_mask = + ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD); + trans_pcie->hw_mask = trans_pcie->hw_init_mask; +} + +static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev, + struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 pci_cmd; + int max_vector; + int ret, i; + + if (trans->cfg->mq_rx_supported) { + max_vector = min_t(u32, (num_possible_cpus() + 1), + IWL_MAX_RX_HW_QUEUES); + for (i = 0; i < max_vector; i++) + trans_pcie->msix_entries[i].entry = i; + + ret = pci_enable_msix_range(pdev, trans_pcie->msix_entries, + MSIX_MIN_INTERRUPT_VECTORS, + max_vector); + if (ret > 1) { + IWL_DEBUG_INFO(trans, + "Enable MSI-X allocate %d interrupt vector\n", + ret); + trans_pcie->allocated_vector = ret; + trans_pcie->default_irq_num = + trans_pcie->allocated_vector - 1; + trans_pcie->trans->num_rx_queues = + trans_pcie->allocated_vector - 1; + trans_pcie->msix_enabled = true; + + return; + } + IWL_DEBUG_INFO(trans, + "ret = %d %s move to msi mode\n", ret, + (ret == 1) ? + "can't allocate more than 1 interrupt vector" : + "failed to enable msi-x mode"); + pci_disable_msix(pdev); + } + + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); + /* enable rfkill interrupt: hw bug w/a */ + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + } + } +} + +static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, + struct iwl_trans_pcie *trans_pcie) +{ + int i, last_vector; + + last_vector = trans_pcie->trans->num_rx_queues; + + for (i = 0; i < trans_pcie->allocated_vector; i++) { + int ret; + + ret = request_threaded_irq(trans_pcie->msix_entries[i].vector, + iwl_pcie_msix_isr, + (i == last_vector) ? + iwl_pcie_irq_msix_handler : + iwl_pcie_irq_rx_msix_handler, + IRQF_SHARED, + DRV_NAME, + &trans_pcie->msix_entries[i]); + if (ret) { + int j; + + IWL_ERR(trans_pcie->trans, + "Error allocating IRQ %d\n", i); + for (j = 0; j < i; j++) + free_irq(trans_pcie->msix_entries[i].vector, + &trans_pcie->msix_entries[i]); + pci_disable_msix(pdev); + return ret; + } + } + + return 0; +} + static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1371,6 +1530,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) iwl_pcie_apm_init(trans); + iwl_pcie_init_msix(trans_pcie); /* From now on, the op_mode will be kept updated about RF kill state */ iwl_enable_rfkill_int(trans); @@ -1425,7 +1585,7 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) mutex_unlock(&trans_pcie->mutex); - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); } static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) @@ -1506,15 +1666,25 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) /* TODO: check if this is really needed */ pm_runtime_disable(trans->dev); - synchronize_irq(trans_pcie->pci_dev->irq); + iwl_pcie_synchronize_irqs(trans); iwl_pcie_tx_free(trans); iwl_pcie_rx_free(trans); - free_irq(trans_pcie->pci_dev->irq, trans); - iwl_pcie_free_ict(trans); + if (trans_pcie->msix_enabled) { + for (i = 0; i < trans_pcie->allocated_vector; i++) + free_irq(trans_pcie->msix_entries[i].vector, + &trans_pcie->msix_entries[i]); + + pci_disable_msix(trans_pcie->pci_dev); + trans_pcie->msix_enabled = false; + } else { + free_irq(trans_pcie->pci_dev->irq, trans); - pci_disable_msi(trans_pcie->pci_dev); + iwl_pcie_free_ict(trans); + + pci_disable_msi(trans_pcie->pci_dev); + } iounmap(trans_pcie->hw_base); pci_release_regions(trans_pcie->pci_dev); pci_disable_device(trans_pcie->pci_dev); @@ -2615,7 +2785,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - u16 pci_cmd; int ret, addr_size; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), @@ -2698,17 +2867,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - ret = pci_enable_msi(pdev); - if (ret) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); - /* enable rfkill interrupt: hw bug w/a */ - pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); - if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { - pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; - pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); - } - } - trans->hw_rev = iwl_read32(trans, CSR_HW_REV); /* * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have @@ -2760,6 +2918,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } + iwl_pcie_set_interrupt_capa(pdev, trans); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device); @@ -2769,19 +2928,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, init_waitqueue_head(&trans_pcie->d0i3_waitq); - ret = iwl_pcie_alloc_ict(trans); - if (ret) - goto out_pci_disable_msi; - - ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, - iwl_pcie_irq_handler, - IRQF_SHARED, DRV_NAME, trans); - if (ret) { - IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); - goto out_free_ict; - } + if (trans_pcie->msix_enabled) { + if (iwl_pcie_init_msix_handler(pdev, trans_pcie)) + goto out_pci_release_regions; + } else { + ret = iwl_pcie_alloc_ict(trans); + if (ret) + goto out_pci_disable_msi; - trans_pcie->inta_mask = CSR_INI_SET_MASK; + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, + iwl_pcie_irq_handler, + IRQF_SHARED, DRV_NAME, trans); + if (ret) { + IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); + goto out_free_ict; + } + trans_pcie->inta_mask = CSR_INI_SET_MASK; + } #ifdef CONFIG_IWLWIFI_PCIE_RTPM trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3; -- cgit v1.2.3 From 60c0a88f2ded5947bdfebbd6d5e9b3925b90984e Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 7 Feb 2016 10:28:13 +0200 Subject: iwlwifi: pcie: fix identation in trans.c A curly brace was misplaced, fix this. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 102d17564178..e67957d6ac79 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); - } + } } ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); kfree(buf); -- cgit v1.2.3 From e48c947f030d1f5c46c684a6cc2db332bc4af33f Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 27 Jan 2016 18:59:48 +0200 Subject: iwlwifi: mvm: enable VHT MU-MIMO for supported hardware Incoming hardware will support VHT MU-MIMO. Declare this capability for relevant hardware. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-8000.c | 1 + drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 3 ++- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 ++ drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 3 +++ 4 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index ad8c588e3eed..97be104d1203 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c @@ -217,6 +217,7 @@ const struct iwl_cfg iwl8265_2ac_cfg = { .nvm_ver = IWL8000_NVM_VERSION, .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .vht_mu_mimo_supported = true, }; const struct iwl_cfg iwl4165_2ac_cfg = { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 083590698e72..8e32a57dda0f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -139,7 +139,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .smem_len = IWL9000_SMEM_LEN, \ .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ - .mq_rx_supported = true + .mq_rx_supported = true, \ + .vht_mu_mimo_supported = true const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index dad5570d6cc8..4f2b57e8bbc7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -312,6 +312,7 @@ struct iwl_pwr_tx_backoff { * @smem_offset: offset from which the SMEM begins * @smem_len: the length of SMEM * @mq_rx_supported: multi-queue rx support + * @vht_mu_mimo_supported: VHT MU-MIMO support * * We enable the driver to be backward compatible wrt. hardware features. * API differences in uCode shouldn't be handled here but through TLVs @@ -364,6 +365,7 @@ struct iwl_cfg { const struct iwl_tt_params *thermal_params; bool apmg_not_supported; bool mq_rx_supported; + bool vht_mu_mimo_supported; }; /* diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 50f4cc60cf3e..e84cb8d638a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -366,6 +366,9 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, max_ampdu_exponent << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; + if (cfg->vht_mu_mimo_supported) + vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE; + if (cfg->ht_params->ldpc) vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC; -- cgit v1.2.3 From a07a8f37023acbd7a1c8453ffe4e35a7fb173c28 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 15 Nov 2015 11:11:59 +0200 Subject: iwlwifi: mvm: update firmware of VHT MU-MIMO groups status on restart The firmware handles the VHT MU-MIMO group data on its own. However, on HW restart (and future sniffer mode) the driver shall update the firmware on the VHT MU-MIMO group membership status. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 18 +++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 28 +++++++++++++++++++++++ 2 files changed, 46 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index d5f90371054e..ca7fec71854f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -285,6 +285,10 @@ enum iwl_phy_ops_subcmd_ids { DTS_MEASUREMENT_NOTIF_WIDE = 0xFF, }; +enum iwl_data_path_subcmd_ids { + UPDATE_MU_GROUPS_CMD = 0x1, +}; + enum iwl_prot_offload_subcmd_ids { STORED_BEACON_NTF = 0xFF, }; @@ -294,6 +298,7 @@ enum { LEGACY_GROUP = 0x0, LONG_GROUP = 0x1, PHY_OPS_GROUP = 0x4, + DATA_PATH_GROUP = 0x5, PROT_OFFLOAD_GROUP = 0xb, }; @@ -1923,6 +1928,19 @@ struct iwl_shared_mem_cfg { __le32 page_buff_size; } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */ +/** + * VHT MU-MIMO group configuration + * + * @membership_status: a bitmap of MU groups + * @user_position:the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_cmd { + __le32 reserved; + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ + #define MAX_STORED_BEACON_SIZE 600 /** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 53156810185d..f1ed90bc2740 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1762,6 +1762,22 @@ static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) } #endif +static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mu_group_mgmt_cmd cmd = {}; + + memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, + WLAN_MEMBERSHIP_LEN); + memcpy(cmd.user_position, vif->bss_conf.mu_group.position, + WLAN_USER_POSITION_LEN); + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + UPDATE_MU_GROUPS_CMD), + 0, sizeof(cmd), &cmd); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1870,6 +1886,18 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, vif->addr); } + /* + * The firmware tracks the MU-MIMO group on its own. + * However, on HW restart we should restore this data + */ + if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && + changes & BSS_CHANGED_MU_GROUPS) { + ret = iwl_mvm_update_mu_groups(mvm, vif); + if (ret) + IWL_ERR(mvm, + "failed to update VHT MU_MIMO groups\n"); + } + iwl_mvm_recalc_multicast(mvm); iwl_mvm_configure_bcast_filter(mvm); -- cgit v1.2.3 From a6d5e32f247cbd3e34c7f86effbf4b426a018c32 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 14 Oct 2015 16:28:52 +0300 Subject: iwlwifi: mvm: send large SKBs to the transport Now that PCIe knows how to create A-MSDUs, use this capability and prepare SKBs that are large enough to build an A-MSDU. Advertise TSO support towards the network stack and segment the packet with gso_size set to be the maximal A-MSDU length (after having taken the headers to be added into account) to make sure that the skb that is passed down to the transport are not longer than the maximal A-MSDU allowed. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 148 ++++++++++++++++++++++++++-- 1 file changed, 140 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 4fbaadda4e99..6f67de5a2858 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -65,6 +65,7 @@ #include #include #include +#include #include "iwl-trans.h" #include "iwl-eeprom-parse.h" @@ -182,7 +183,8 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_cmd->tx_flags = cpu_to_le32(tx_flags); /* Total # bytes to be transmitted */ - tx_cmd->len = cpu_to_le16((u16)skb->len); + tx_cmd->len = cpu_to_le16((u16)skb->len + + (uintptr_t)info->driver_data[0]); tx_cmd->next_frame_len = 0; tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); tx_cmd->sta_id = sta_id; @@ -372,6 +374,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) info->hw_queue != info->control.vif->cab_queue))) return -1; + /* This holds the amsdu headers length */ + info->driver_data[0] = (void *)(uintptr_t)0; + /* * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel @@ -428,33 +433,156 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return 0; } -static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb_gso, +#ifdef CONFIG_INET +static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int mss = skb_shinfo(skb)->gso_size; struct sk_buff *tmp, *next; - char cb[sizeof(skb_gso->cb)]; + char cb[sizeof(skb->cb)]; + unsigned int num_subframes, tcp_payload_len, subf_len; + bool ipv4 = (skb->protocol == htons(ETH_P_IP)); + u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; + u16 amsdu_add, snap_ip_tcp, pad, i = 0; + + snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + + tcp_hdrlen(skb); + + if (!sta->max_amsdu_len || + !ieee80211_is_data_qos(hdr->frame_control)) { + num_subframes = 1; + pad = 0; + goto segment; + } + + /* TODO: for now, disable A-MSDU inside AMPDU */ + if (info->flags & IEEE80211_TX_CTL_AMPDU) { + num_subframes = 1; + pad = 0; + goto segment; + } + + /* Sub frame header + SNAP + IP header + TCP header + MSS */ + subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; + pad = (4 - subf_len) & 0x3; + + /* + * If we have N subframes in the A-MSDU, then the A-MSDU's size is + * N * subf_len + (N - 1) * pad. + */ + num_subframes = (sta->max_amsdu_len + pad) / (subf_len + pad); + if (num_subframes > 1) { + u8 *qc = ieee80211_get_qos_ctl((void *)skb->data); + + *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } + + tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - + tcp_hdrlen(skb) + skb->data_len; + + /* + * Make sure we have enough TBs for the A-MSDU: + * 2 for each subframe + * 1 more for each fragment + * 1 more for the potential data in the header + */ + num_subframes = + min_t(unsigned int, num_subframes, + (mvm->trans->max_skb_frags - 1 - + skb_shinfo(skb)->nr_frags) / 2); + + /* This skb fits in one single A-MSDU */ + if (num_subframes * mss >= tcp_payload_len) { + /* + * Compute the length of all the data added for the A-MSDU. + * This will be used to compute the length to write in the TX + * command. We have: SNAP + IP + TCP for n -1 subframes and + * ETH header for n subframes. Note that the original skb + * already had one set of SNAP / IP / TCP headers. + */ + num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); + info = IEEE80211_SKB_CB(skb); + amsdu_add = num_subframes * sizeof(struct ethhdr) + + (num_subframes - 1) * (snap_ip_tcp + pad); + /* This holds the amsdu headers length */ + info->driver_data[0] = (void *)(uintptr_t)amsdu_add; + + __skb_queue_tail(mpdus_skb, skb); + return 0; + } - memcpy(cb, skb_gso->cb, sizeof(cb)); - next = skb_gso_segment(skb_gso, 0); - if (IS_ERR(next)) + /* + * Trick the segmentation function to make it + * create SKBs that can fit into one A-MSDU. + */ +segment: + skb_shinfo(skb)->gso_size = num_subframes * mss; + memcpy(cb, skb->cb, sizeof(cb)); + + next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG); + skb_shinfo(skb)->gso_size = mss; + if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) - consume_skb(skb_gso); + consume_skb(skb); while (next) { tmp = next; next = tmp->next; + memcpy(tmp->cb, cb, sizeof(tmp->cb)); + /* + * Compute the length of all the data added for the A-MSDU. + * This will be used to compute the length to write in the TX + * command. We have: SNAP + IP + TCP for n -1 subframes and + * ETH header for n subframes. + */ + tcp_payload_len = skb_tail_pointer(tmp) - + skb_transport_header(tmp) - + tcp_hdrlen(tmp) + tmp->data_len; + + if (ipv4) + ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes); + + if (tcp_payload_len > mss) { + num_subframes = DIV_ROUND_UP(tcp_payload_len, mss); + info = IEEE80211_SKB_CB(tmp); + amsdu_add = num_subframes * sizeof(struct ethhdr) + + (num_subframes - 1) * (snap_ip_tcp + pad); + info->driver_data[0] = (void *)(uintptr_t)amsdu_add; + skb_shinfo(tmp)->gso_size = mss; + } else { + u8 *qc = ieee80211_get_qos_ctl((void *)tmp->data); + + if (ipv4) + ip_send_check(ip_hdr(tmp)); + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + skb_shinfo(tmp)->gso_size = 0; + } tmp->prev = NULL; tmp->next = NULL; __skb_queue_tail(mpdus_skb, tmp); + i++; } return 0; } +#else /* CONFIG_INET */ +static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta, + struct sk_buff_head *mpdus_skb) +{ + /* Impossible to get TSO with CONFIG_INET */ + WARN_ON(1); + + return -1; +} +#endif /* * Sets the fields in the Tx cmd that are crypto related @@ -560,6 +688,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct sk_buff_head mpdus_skbs; unsigned int payload_len; int ret; @@ -570,6 +699,9 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT)) return -1; + /* This holds the amsdu headers length */ + info->driver_data[0] = (void *)(uintptr_t)0; + if (!skb_is_gso(skb)) return iwl_mvm_tx_mpdu(mvm, skb, sta); @@ -589,7 +721,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, return ret; while (!skb_queue_empty(&mpdus_skbs)) { - struct sk_buff *skb = __skb_dequeue(&mpdus_skbs); + skb = __skb_dequeue(&mpdus_skbs); ret = iwl_mvm_tx_mpdu(mvm, skb, sta); if (ret) { -- cgit v1.2.3 From bb81bb68f472bb0f9afbb483259d91d4efd86bfb Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 26 Oct 2015 16:00:29 +0200 Subject: iwlwifi: mvm: add Tx A-MSDU inside A-MPDU If the peer allows, we can have A-MSDU inside A-MDPU. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 4 ++- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 4 ++- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 5 ++- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 37 +++++++++++++++++------ 4 files changed, 38 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index f1ed90bc2740..1a4946fc9b27 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -847,6 +847,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, u16 tid = params->tid; u16 *ssn = ¶ms->ssn; u8 buf_size = params->buf_size; + bool amsdu = params->amsdu; IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", sta->addr, tid, action); @@ -907,7 +908,8 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: - ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); + ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, + buf_size, amsdu); break; default: WARN_ON_ONCE(1); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4854e79cbda8..b2123ce3e3a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1031,7 +1031,8 @@ release_locks: } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size) + struct ieee80211_sta *sta, u16 tid, u8 buf_size, + bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; @@ -1051,6 +1052,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, tid_data->state = IWL_AGG_ON; mvmsta->agg_tids |= BIT(tid); tid_data->ssn = 0xffff; + tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index e3b9446ee995..b01650ac3598 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -260,6 +260,7 @@ enum iwl_mvm_agg_state { * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @reduced_tpc: Reduced tx power. Holds the data between the * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). + * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. * @txq_id: Tx queue used by the BA session * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or @@ -274,6 +275,7 @@ struct iwl_mvm_tid_data { /* The rest is Tx AGG related */ u32 rate_n_flags; u8 reduced_tpc; + bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; u16 txq_id; u16 ssn; @@ -405,7 +407,8 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size); + struct ieee80211_sta *sta, u16 tid, u8 buf_size, + bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f67de5a2858..ff08b17b76dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -438,19 +438,26 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_sta *sta, struct sk_buff_head *mpdus_skb) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int mss = skb_shinfo(skb)->gso_size; struct sk_buff *tmp, *next; char cb[sizeof(skb->cb)]; - unsigned int num_subframes, tcp_payload_len, subf_len; + unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len; bool ipv4 = (skb->protocol == htons(ETH_P_IP)); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; u16 amsdu_add, snap_ip_tcp, pad, i = 0; + u8 *qc, tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); + qc = ieee80211_get_qos_ctl(hdr); + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) + return -EINVAL; + if (!sta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control)) { num_subframes = 1; @@ -458,13 +465,28 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, goto segment; } - /* TODO: for now, disable A-MSDU inside AMPDU */ - if (info->flags & IEEE80211_TX_CTL_AMPDU) { + /* + * No need to lock amsdu_in_ampdu_allowed since it can't be modified + * during an BA session. + */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) { num_subframes = 1; pad = 0; goto segment; } + max_amsdu_len = sta->max_amsdu_len; + + /* + * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not + * supported. This is a spec requirement (IEEE 802.11-2015 + * section 8.7.3 NOTE 3). + */ + if (info->flags & IEEE80211_TX_CTL_AMPDU && + !sta->vht_cap.vht_supported) + max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095); + /* Sub frame header + SNAP + IP header + TCP header + MSS */ subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss; pad = (4 - subf_len) & 0x3; @@ -473,12 +495,9 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, * If we have N subframes in the A-MSDU, then the A-MSDU's size is * N * subf_len + (N - 1) * pad. */ - num_subframes = (sta->max_amsdu_len + pad) / (subf_len + pad); - if (num_subframes > 1) { - u8 *qc = ieee80211_get_qos_ctl((void *)skb->data); - + num_subframes = (max_amsdu_len + pad) / (subf_len + pad); + if (num_subframes > 1) *qc |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; - } tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - tcp_hdrlen(skb) + skb->data_len; @@ -555,7 +574,7 @@ segment: info->driver_data[0] = (void *)(uintptr_t)amsdu_add; skb_shinfo(tmp)->gso_size = mss; } else { - u8 *qc = ieee80211_get_qos_ctl((void *)tmp->data); + qc = ieee80211_get_qos_ctl((void *)tmp->data); if (ipv4) ip_send_check(ip_hdr(tmp)); -- cgit v1.2.3 From 9e7dce28659531b2741939e5b6f438bca1422720 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 26 Oct 2015 16:14:06 +0200 Subject: iwlwifi: mvm: allow to limit the A-MSDU from debugfs in order to be able to tune the size of the desired A-MSDU based on link condition, add a knob to modify the length of the A-MSDU. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 19 +++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 5 +++++ 3 files changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 54d0d7d2d0b1..56e6b0b8b9cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -64,6 +64,7 @@ * *****************************************************************************/ #include +#include #include "mvm.h" #include "fw-dbg.h" @@ -1080,6 +1081,22 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, return count; } +static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm, + char *buf, size_t count, + loff_t *ppos) +{ + unsigned int max_amsdu_len; + int ret; + + ret = kstrtouint(buf, 0, &max_amsdu_len); + + if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454) + return -EINVAL; + mvm->max_amsdu_len = max_amsdu_len; + + return count; +} + #define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__) #ifdef CONFIG_IWLWIFI_BCAST_FILTERING static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file, @@ -1497,6 +1514,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8); +MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8); MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, (IWL_RSS_INDIRECTION_TABLE_SIZE * 2)); @@ -1541,6 +1559,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, S_IRUSR | S_IWUSR); MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR); + MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index bb811a29490d..416aedb7c19e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -861,6 +861,7 @@ struct iwl_mvm { /* Indicate if device power save is allowed */ u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */ + unsigned int max_amsdu_len; /* used for debugfs only */ struct ieee80211_vif __rcu *csa_vif; struct ieee80211_vif __rcu *csa_tx_blocked_vif; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ff08b17b76dd..a5a55da142eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -448,6 +448,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, bool ipv4 = (skb->protocol == htons(ETH_P_IP)); u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; u16 amsdu_add, snap_ip_tcp, pad, i = 0; + unsigned int dbg_max_amsdu_len; u8 *qc, tid; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + @@ -477,6 +478,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, } max_amsdu_len = sta->max_amsdu_len; + dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + if (dbg_max_amsdu_len) + max_amsdu_len = min_t(unsigned int, max_amsdu_len, + dbg_max_amsdu_len); /* * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not -- cgit v1.2.3 From 04e3a5da6b73d7f01661288ead426986fa5312a1 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 28 Oct 2015 09:47:41 +0200 Subject: iwlwifi: mvm: don't enable A-MSDU when the rates are too low Allow A-MSDU only when we are not downscaling and the initial MCS is at least 5. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rs.c | 24 ++++++++++++++++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 3 ++- 3 files changed, 26 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 3dc94d2e8a65..61d0a8cd13f9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -1673,6 +1673,20 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search) } } +static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + struct iwl_scale_tbl_info *tbl, + enum rs_action scale_action) +{ + struct iwl_mvm_sta *sta_priv = iwl_mvm_sta_from_mac80211(sta); + + if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) || + tbl->rate.index < IWL_RATE_MCS_5_INDEX || + scale_action == RS_ACTION_DOWNSCALE) + sta_priv->tlc_amsdu = false; + else + sta_priv->tlc_amsdu = true; +} + /* * setup rate table in uCode */ @@ -2416,6 +2430,7 @@ lq_update: tbl->rate.index = index; if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK) rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action); + rs_set_amsdu_len(mvm, sta, tbl, scale_action); rs_update_rate_tbl(mvm, sta, lq_sta, tbl); } @@ -3099,6 +3114,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, sband = hw->wiphy->bands[band]; lq_sta->lq.sta_id = sta_priv->sta_id; + sta_priv->tlc_amsdu = false; for (j = 0; j < LQ_SIZE; j++) rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]); @@ -3658,10 +3674,13 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, ssize_t ret; struct iwl_lq_sta *lq_sta = file->private_data; + struct iwl_mvm_sta *mvmsta = + container_of(lq_sta, struct iwl_mvm_sta, lq_sta); struct iwl_mvm *mvm; struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); struct rs_rate *rate = &tbl->rate; u32 ss_params; + mvm = lq_sta->pers.drv; buff = kmalloc(2048, GFP_KERNEL); if (!buff) @@ -3687,10 +3706,11 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file, (is_ht20(rate)) ? "20MHz" : (is_ht40(rate)) ? "40MHz" : (is_ht80(rate)) ? "80Mhz" : "BAD BW"); - desc += sprintf(buff + desc, " %s %s %s\n", + desc += sprintf(buff + desc, " %s %s %s %s\n", (rate->sgi) ? "SGI" : "NGI", (rate->ldpc) ? "LDPC" : "BCC", - (lq_sta->is_agg) ? "AGG on" : ""); + (lq_sta->is_agg) ? "AGG on" : "", + (mvmsta->tlc_amsdu) ? "AMSDU on" : ""); } desc += sprintf(buff+desc, "last tx rate=0x%X\n", lq_sta->last_rate_n_flags); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index b01650ac3598..f95f603ad56c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -313,6 +313,7 @@ struct iwl_mvm_key_pn { * @tx_protection: reference counter for controlling the Tx protection. * @tt_tx_protection: is thermal throttling enable Tx protection? * @disable_tx: is tx to this STA disabled? + * @tlc_amsdu: true if A-MSDU is allowed * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON) * @sleep_tx_count: the number of frames that we told the firmware to let out * even when that station is asleep. This is useful in case the queue @@ -347,6 +348,7 @@ struct iwl_mvm_sta { bool tt_tx_protection; bool disable_tx; + bool tlc_amsdu; u8 agg_tids; u8 sleep_tx_count; }; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a5a55da142eb..a73ae01f3a48 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -460,7 +460,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, return -EINVAL; if (!sta->max_amsdu_len || - !ieee80211_is_data_qos(hdr->frame_control)) { + !ieee80211_is_data_qos(hdr->frame_control) || + !mvmsta->tlc_amsdu) { num_subframes = 1; pad = 0; goto segment; -- cgit v1.2.3 From 50b0213fdb830e21ed64375230c3271b21bb584a Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Wed, 11 Nov 2015 11:37:02 +0200 Subject: iwlwifi: mvm: don't send an A-MSDU that is larger than the TXF The A-MSDU must be smaller than the Transmit FIFO in the device. Since the size of the TXF can change depending on the device / firmware compilation mode, take the size of the FIFO dynamically from the what the firmware tells us. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a73ae01f3a48..dd616c1d59b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -449,7 +449,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0; u16 amsdu_add, snap_ip_tcp, pad, i = 0; unsigned int dbg_max_amsdu_len; - u8 *qc, tid; + u8 *qc, tid, txf; snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); @@ -480,6 +480,19 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, max_amsdu_len = sta->max_amsdu_len; dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + + /* the Tx FIFO to which this A-MSDU will be routed */ + txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + + /* + * Don't send an AMSDU that will be longer than the TXF. + * Add a security margin of 256 for the TX command + headers. + * We also want to have the start of the next packet inside the + * fifo to be able to send bursts. + */ + max_amsdu_len = min_t(unsigned int, max_amsdu_len, + mvm->shared_mem_cfg.txfifo_size[txf] - 256); + if (dbg_max_amsdu_len) max_amsdu_len = min_t(unsigned int, max_amsdu_len, dbg_max_amsdu_len); -- cgit v1.2.3 From e0d8fdecf3acf36eb738e42d3caf46371500881c Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 28 Dec 2015 22:37:08 +0200 Subject: iwlwifi: support tracing wide commands Current iwlwifi_trace_dev_rx prints only the cmd without the group, which might be misleading. Change it to print the wide id. While at it add the DATA_PATH group and sub commands to the trace of the command names, sine it is missing due to patches submitted in parallel. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h | 4 ++-- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 22786d7dc00a..2a0703fcec56 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -73,12 +73,12 @@ TRACE_EVENT(iwlwifi_dev_rx, TP_ARGS(dev, trans, pkt, len), TP_STRUCT__entry( DEV_ENTRY - __field(u8, cmd) + __field(u16, cmd) __dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len)) ), TP_fast_assign( DEV_ASSIGN; - __entry->cmd = pkt->hdr.cmd; + __entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); memcpy(__get_dynamic_array(rxbuf), pkt, iwl_rx_trace_len(trans, pkt, len)); ), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index c5b6e8671169..bfa6da1bf846 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -395,6 +395,13 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { + HCMD_NAME(UPDATE_MU_GROUPS_CMD), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -406,6 +413,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names), [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names), + [DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names), [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), }; -- cgit v1.2.3 From 77fe739554e13d44466e115dbaba3e7aa3aececd Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 10 Jan 2016 14:23:25 +0200 Subject: iwlwifi: mvm: update rx_status with mactime flag When forming IBSS, mac80211 scans in order to find an already existing cell to join. In case the scan does not find any existing cell a new IBSS cell is formed. When receiving the beacons of another IBSS cell we should merge if the other IBSS cell's TSF is higher than ours. However, currently iwlmvm does not set any timestamp flag in rx_status so there is no valid rx timestamp to compare the beacon's TSF to. The reason for that is that TSF as indicated by the firmware is at INA time, but up till now mac80211 expected the TSF at the beginning or end of the MPDU. Set the flag to the newly added RX_FLAG_MACTIME_PLCP_START flag. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 8 +++----- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 2 ++ 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 535134d639e0..e885db3464b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -1484,6 +1484,8 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, /* update rx_status according to the notification's metadata */ memset(&rx_status, 0, sizeof(rx_status)); rx_status.mactime = le64_to_cpu(sb->tsf); + /* TSF as indicated by the firmware is at INA time */ + rx_status.flag |= RX_FLAG_MACTIME_PLCP_START; rx_status.device_timestamp = le32_to_cpu(sb->system_time); rx_status.band = (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ? diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 055a8b0f7ad7..ad625f0c79ed 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -322,11 +322,9 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->freq = ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel), rx_status->band); - /* - * TSF as indicated by the fw is at INA time, but mac80211 expects the - * TSF at the beginning of the MPDU. - */ - /*rx_status->flag |= RX_FLAG_MACTIME_MPDU;*/ + + /* TSF as indicated by the firmware is at INA time */ + rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; iwl_mvm_get_signal_strength(mvm, phy_info, rx_status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 615dea143d4e..a9180b06fb52 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -332,6 +332,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, desc, rx_status); + /* TSF as indicated by the firmware is at INA time */ + rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; rcu_read_lock(); -- cgit v1.2.3 From 3af512d6aac7eb6420086f124abb4426f5f4b369 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 22 Jul 2015 11:38:40 +0300 Subject: iwlwifi: mvm: support filtered frames notification During d0i3 frames might be filtered by the FW and this may cause reordering buffer a delay - as the frames will not be received and reorder will time out. Introduce an API function to receive notification of filtered frames and pass the information to the mac80211. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 22 +++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 4 ++ drivers/net/wireless/intel/iwlwifi/mvm/rx.c | 49 +++++++++++++++++++++++++ 4 files changed, 77 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index ca7fec71854f..e1e11946f7e9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -119,6 +119,8 @@ enum { SCAN_ABORT_UMAC = 0xe, SCAN_COMPLETE_UMAC = 0xf, + BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13, + /* station table */ ADD_STA_KEY = 0x17, ADD_STA = 0x18, @@ -1286,6 +1288,26 @@ struct iwl_fw_bcast_filter { struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS]; } __packed; /* BCAST_FILTER_S_VER_1 */ +#define BA_WINDOW_STREAMS_MAX 16 +#define BA_WINDOW_STATUS_TID_MSK 0x000F +#define BA_WINDOW_STATUS_STA_ID_POS 4 +#define BA_WINDOW_STATUS_STA_ID_MSK 0x01F0 +#define BA_WINDOW_STATUS_VALID_MSK BIT(9) + +/** + * struct iwl_ba_window_status_notif - reordering window's status notification + * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63] + * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid + * @start_seq_num: the start sequence number of the bitmap + * @mpdu_rx_count: the number of received MPDUs since entering D0i3 + */ +struct iwl_ba_window_status_notif { + __le64 bitmap[BA_WINDOW_STREAMS_MAX]; + __le16 ra_tid[BA_WINDOW_STREAMS_MAX]; + __le32 start_seq_num[BA_WINDOW_STREAMS_MAX]; + __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; +} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ + /** * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration. * @default_discard: default action for this mac (discard (1) / pass (0)). diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 416aedb7c19e..fa987bd9da0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1268,6 +1268,8 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, struct ieee80211_vif *vif); unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index bfa6da1bf846..52c73d0c1be5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -236,6 +236,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, iwl_mvm_rx_ant_coupling_notif, true), + RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, + iwl_mvm_window_status_notif, false), + RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), @@ -294,6 +297,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(SCAN_COMPLETE_UMAC), HCMD_NAME(TOF_CMD), HCMD_NAME(TOF_NOTIFICATION), + HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID), HCMD_NAME(ADD_STA_KEY), HCMD_NAME(ADD_STA), HCMD_NAME(REMOVE_STA), diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index ad625f0c79ed..485cfc1a4daa 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -626,3 +627,51 @@ void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb)); } + +void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_ba_window_status_notif *notif = (void *)pkt->data; + int i; + u32 pkt_len = iwl_rx_packet_payload_len(pkt); + + if (WARN_ONCE(pkt_len != sizeof(*notif), + "Received window status notification of wrong size (%u)\n", + pkt_len)) + return; + + rcu_read_lock(); + for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) { + struct ieee80211_sta *sta; + u8 sta_id, tid; + u64 bitmap; + u32 ssn; + u16 ratid; + u16 received_mpdu; + + ratid = le16_to_cpu(notif->ra_tid[i]); + /* check that this TID is valid */ + if (!(ratid & BA_WINDOW_STATUS_VALID_MSK)) + continue; + + received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]); + if (received_mpdu == 0) + continue; + + tid = ratid & BA_WINDOW_STATUS_TID_MSK; + /* get the station */ + sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK) + >> BA_WINDOW_STATUS_STA_ID_POS; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) + continue; + bitmap = le64_to_cpu(notif->bitmap[i]); + ssn = le32_to_cpu(notif->start_seq_num[i]); + + /* update mac80211 with the bitmap for the reordering buffer */ + ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap, + received_mpdu); + } + rcu_read_unlock(); +} -- cgit v1.2.3 From 88076015f80f7645b1eaada6291f4a23ab96bd8f Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 15 Feb 2016 17:26:48 +0200 Subject: iwlwifi: pcie: configure more RFH settings Fine tune RFH registers further: * Set default queue explicitly * Set RFH to drop frames exceeding RB size * Set the maximum rx transfer size to DRAM to 128 instead of 64 Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 24 ++++++++++++++---------- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 12 +++++++++++- 2 files changed, 25 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 4ab6682ea53e..8af818b10e71 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -6,7 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -32,7 +32,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -368,20 +368,24 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS) #define RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS) -#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ +#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */ #define RFH_RXF_DMA_MIN_RB_SIZE_POS 24 -#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) -#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ -#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ -#define RFH_DMA_EN_ENABLE_VAL BIT(31) +#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS) +#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */ +#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */ +#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/ +#define RFH_DMA_EN_ENABLE_VAL BIT(31) #define RFH_RXF_RXQ_ACTIVE 0xA0980C #define RFH_GEN_CFG 0xA09800 +#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) +#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) +#define RFH_GEN_CFG_RB_CHUNK_SIZE BIT(4) /* 0 - 64B, 1- 128B */ #define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00 -#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0) -#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1) -#define DEFAULT_RXQ_NUM 8 +#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS 8 + +#define DEFAULT_RXQ_NUM 0 /* end of 9000 rx series registers */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index c0db681d66d1..398dd9332345 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -783,16 +783,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) * Single frame mode * Rx buffer size 4 or 8k or 12k * Min RB size 4 or 8 + * Drop frames that exceed RB size * 512 RBDs */ iwl_write_prph(trans, RFH_RXF_DMA_CFG, RFH_DMA_EN_ENABLE_VAL | rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_RBDCB_SIZE_512); + /* + * Activate DMA snooping. + * Set RX DMA chunk size to 128 bit + * Default queue is 0 + */ iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | - RFH_GEN_CFG_SERVICE_DMA_SNOOP); + RFH_GEN_CFG_RB_CHUNK_SIZE | + (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | + RFH_GEN_CFG_SERVICE_DMA_SNOOP); + /* Enable the relevant rx queues */ iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); /* Set interrupt coalescing timer to default (2048 usecs) */ -- cgit v1.2.3 From 863eac305a7ba75e7c394b2a24cdee2a636b7dc4 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 1 Dec 2015 20:39:57 +0200 Subject: iwlwifi: pcie: add pm_prepare and pm_complete ops With these ops, we can know when we are about to enter system suspend. This allows us to exit D0i3 state before entering suspend. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-trans.h | 1 + drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 41 ++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 0ca0f13b69b0..91d74b3f666b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -836,6 +836,7 @@ struct iwl_trans { enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode runtime_pm_mode; + bool suspending; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 753ec6785912..d33b6baf5f98 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -811,6 +811,45 @@ static int iwl_pci_runtime_resume(struct device *device) return 0; } + +static int iwl_pci_system_prepare(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + + IWL_DEBUG_RPM(trans, "preparing for system suspend\n"); + + /* This is called before entering system suspend and before + * the runtime resume is called. Set the suspending flag to + * prevent the wakelock from being taken. + */ + trans->suspending = true; + + /* Wake the device up from runtime suspend before going to + * platform suspend. This is needed because we don't know + * whether wowlan any is set and, if it's not, mac80211 will + * disconnect (in which case, we can't be in D0i3). + */ + pm_runtime_resume(device); + + return 0; +} + +static void iwl_pci_system_complete(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct iwl_trans *trans = pci_get_drvdata(pdev); + + IWL_DEBUG_RPM(trans, "completing system suspend\n"); + + /* This is called as a counterpart to the prepare op. It is + * called either when suspending fails or when suspend + * completed successfully. Now there's no risk of grabbing + * the wakelock anymore, so we can release the suspending + * flag. + */ + trans->suspending = false; +} #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ static const struct dev_pm_ops iwl_dev_pm_ops = { @@ -820,6 +859,8 @@ static const struct dev_pm_ops iwl_dev_pm_ops = { SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend, iwl_pci_runtime_resume, NULL) + .prepare = iwl_pci_system_prepare, + .complete = iwl_pci_system_complete, #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ }; -- cgit v1.2.3 From 685b346c3af35bcdc1316289ce8c76e28a5a7a7e Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 23 Feb 2016 11:34:17 +0200 Subject: iwlwifi: pcie: prevent skbs shadowing in iwl_trans_pcie_reclaim The patch below introduced a variable shadowing. Fix that. Fixes: 3955525d5d17 ("iwlwifi: pcie: buffer packets to avoid overflowing Tx queues") Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 837a7d536874..16ad820ca824 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1062,10 +1062,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, if (iwl_queue_space(&txq->q) > txq->q.low_mark && test_bit(txq_id, trans_pcie->queue_stopped)) { - struct sk_buff_head skbs; + struct sk_buff_head overflow_skbs; - __skb_queue_head_init(&skbs); - skb_queue_splice_init(&txq->overflow_q, &skbs); + __skb_queue_head_init(&overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, &overflow_skbs); /* * This is tricky: we are in reclaim path which is non @@ -1076,8 +1076,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, */ spin_unlock_bh(&txq->lock); - while (!skb_queue_empty(&skbs)) { - struct sk_buff *skb = __skb_dequeue(&skbs); + while (!skb_queue_empty(&overflow_skbs)) { + struct sk_buff *skb = __skb_dequeue(&overflow_skbs); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1; struct iwl_device_cmd *dev_cmd = -- cgit v1.2.3 From a571f5f635ef40ff3a5e85acc7f20daa58bcf06a Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 7 Dec 2015 12:50:58 +0200 Subject: iwlwifi: mvm: add duplicate packet detection per rx queue Next hardware will direct TCP/UDP streams to different cores. Packets belonging to the same stream will be directed to the same core. The result is that duplicates will be always directed to the same rx queue were the first packet was received. This enabled parallelizing the duplicate packet detection across the different cores, without sharing data between the rx queues. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 66 +++++++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 14 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 13 +++++- 3 files changed, 92 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a9180b06fb52..d89194223af3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -285,6 +285,66 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta, skb->ip_summed = CHECKSUM_UNNECESSARY; } +/* + * returns true if a packet outside BA session is a duplicate and + * should be dropped + */ +static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, + struct ieee80211_rx_status *rx_status, + struct ieee80211_hdr *hdr, + struct iwl_rx_mpdu_desc *desc) +{ + struct iwl_mvm_sta *mvm_sta; + struct iwl_mvm_rxq_dup_data *dup_data; + u8 baid, tid, sub_frame_idx; + + if (WARN_ON(IS_ERR_OR_NULL(sta))) + return false; + + baid = (le32_to_cpu(desc->reorder_data) & + IWL_RX_MPDU_REORDER_BAID_MASK) >> + IWL_RX_MPDU_REORDER_BAID_SHIFT; + + if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) + return false; + + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + dup_data = &mvm_sta->dup_data[queue]; + + /* + * Drop duplicate 802.11 retransmissions + * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") + */ + if (ieee80211_is_ctl(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control) || + is_multicast_ether_addr(hdr->addr1)) { + rx_status->flag |= RX_FLAG_DUP_VALIDATED; + return false; + } + + if (ieee80211_is_data_qos(hdr->frame_control)) + /* frame has qos control */ + tid = *ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_TID_MASK; + else + tid = IWL_MAX_TID_COUNT; + + /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ + sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + + if (unlikely(ieee80211_has_retry(hdr->frame_control) && + dup_data->last_seq[tid] == hdr->seq_ctrl && + dup_data->last_sub_frame[tid] >= sub_frame_idx)) + return true; + + dup_data->last_seq[tid] = hdr->seq_ctrl; + dup_data->last_sub_frame[tid] = sub_frame_idx; + + rx_status->flag |= RX_FLAG_DUP_VALIDATED; + + return false; +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { @@ -389,6 +449,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (ieee80211_is_data(hdr->frame_control)) iwl_mvm_rx_csum(sta, skb, desc); + + if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { + kfree_skb(skb); + rcu_read_unlock(); + return; + } } /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index b2123ce3e3a8..4717b185f5b0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -280,6 +280,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_rxq_dup_data *dup_data; int i, ret, sta_id; lockdep_assert_held(&mvm->mutex); @@ -327,6 +328,16 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, } mvm_sta->agg_tids = 0; + if (iwl_mvm_has_new_rx_api(mvm) && + !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { + dup_data = kcalloc(mvm->trans->num_rx_queues, + sizeof(*dup_data), + GFP_KERNEL); + if (!dup_data) + return -ENOMEM; + mvm_sta->dup_data = dup_data; + } + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); if (ret) goto err; @@ -508,6 +519,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_has_new_rx_api(mvm)) + kfree(mvm_sta->dup_data); + if (vif->type == NL80211_IFTYPE_STATION && mvmvif->ap_sta_id == mvm_sta->sta_id) { ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index f95f603ad56c..db701cad87c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -295,6 +295,16 @@ struct iwl_mvm_key_pn { } ____cacheline_aligned_in_smp q[]; }; +/** + * struct iwl_mvm_rxq_dup_data - per station per rx queue data + * @last_seq: last sequence per tid for duplicate packet detection + * @last_sub_frame: last subframe packet + */ +struct iwl_mvm_rxq_dup_data { + __le16 last_seq[IWL_MAX_TID_COUNT + 1]; + u8 last_sub_frame[IWL_MAX_TID_COUNT + 1]; +} ____cacheline_aligned_in_smp; + /** * struct iwl_mvm_sta - representation of a station in the driver * @sta_id: the index of the station in the fw (will be replaced by id_n_color) @@ -321,6 +331,7 @@ struct iwl_mvm_key_pn { * we are sending frames from an AMPDU queue and there was a hole in * the BA window. To be used for UAPSD only. * @ptk_pn: per-queue PTK PN data structures + * @dup_data: per queue duplicate packet detection data * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -340,8 +351,8 @@ struct iwl_mvm_sta { struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; - struct iwl_mvm_key_pn __rcu *ptk_pn[4]; + struct iwl_mvm_rxq_dup_data *dup_data; /* Temporary, until the new TLC will control the Tx protection */ s8 tx_protection; -- cgit v1.2.3 From 94bb44813ebe07ff06f869c7612c0c73e04ac4b0 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 16 Dec 2015 18:48:28 +0200 Subject: iwlwifi: mvm: add RSS queues notification infrastructure In multi rx queue HW, without execessive locking, there is no sync between the ctrl path (default queue) and the rest of the rx queues. This might cause issues on certain situations. For example, in case a delBA was processed on a default queue but out of order packets still wait for processing on the other queue. The solution is to introduce internal messaging between the CTRL path and the other rx queues. The driver will send a message to the firmware, which will echo it to all the requested queues. The message will be in order inside the queue. This way we can avoid CTRL path and RSS queues races. Add support for this messaging mechanism. As the firmware is agnostic to the data sent, add internal representation of the data as well. Although currently only delBA flow will use it, the internal representation will enable generic use of this infrastructure for future uses. Next patch will utilize this messaging mechanism for the reorder buffer delBA flow. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 52 ++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 2 + drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 4 ++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 8 ++++ drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 48 ++++++++++++++++++++ 5 files changed, 114 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index df939f51d9b9..eb9b87038e1f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -391,4 +391,56 @@ struct iwl_rss_config_cmd { u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE]; } __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */ +#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128 +#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0 +#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf + +/** + * struct iwl_rxq_sync_cmd - RXQ notification trigger + * + * @flags: flags of the notification. bit 0:3 are the sender queue + * @rxq_mask: rx queues to send the notification on + * @count: number of bytes in payload, should be DWORD aligned + * @payload: data to send to rx queues + */ +struct iwl_rxq_sync_cmd { + __le32 flags; + __le32 rxq_mask; + __le32 count; + u8 payload[]; +} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ + +/** + * struct iwl_rxq_sync_notification - Notification triggered by RXQ + * sync command + * + * @count: number of bytes in payload + * @payload: data to send to rx queues + */ +struct iwl_rxq_sync_notification { + __le32 count; + u8 payload[]; +} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */ + +/** +* Internal message identifier +* +* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA +*/ +enum iwl_mvm_rxq_notif_type { + IWL_MVM_RXQ_NOTIF_DEL_BA, +}; + +/** +* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent +* in &iwl_rxq_sync_cmd. Should be DWORD aligned. +* +* @type: value from &iwl_mvm_rxq_notif_type +* @data: payload +*/ +struct iwl_mvm_internal_rxq_notif { + u32 type; + u8 data[]; +} __packed; + #endif /* __fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index e1e11946f7e9..f432ddd4cde3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -289,6 +289,8 @@ enum iwl_phy_ops_subcmd_ids { enum iwl_data_path_subcmd_ids { UPDATE_MU_GROUPS_CMD = 0x1, + TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + RX_QUEUES_NOTIFICATION = 0xFF, }; enum iwl_prot_offload_subcmd_ids { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index fa987bd9da0d..f05d2a1f4467 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1225,6 +1225,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, int queue); +int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, + const u8 *data, u32 count); +void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + int queue); void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 52c73d0c1be5..ac271ffe477b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -404,6 +404,8 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(UPDATE_MU_GROUPS_CMD), + HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(RX_QUEUES_NOTIFICATION), }; /* Please keep this array *SORTED* by hex value. @@ -876,6 +878,9 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0); else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD) iwl_mvm_rx_phy_cmd_mq(mvm, rxb); + else if (unlikely(pkt->hdr.group_id == DATA_PATH_GROUP && + pkt->hdr.cmd == RX_QUEUES_NOTIFICATION)) + iwl_mvm_rx_queue_notif(mvm, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } @@ -1548,6 +1553,9 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode, if (unlikely(pkt->hdr.cmd == FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, rxb, queue); + else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION && + pkt->hdr.group_id == DATA_PATH_GROUP)) + iwl_mvm_rx_queue_notif(mvm, rxb, queue); else iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index d89194223af3..590fc6faff04 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -345,6 +345,54 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, return false; } +int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, + const u8 *data, u32 count) +{ + struct iwl_rxq_sync_cmd *cmd; + u32 data_size = sizeof(*cmd) + count; + int ret; + + /* should be DWORD aligned */ + if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE)) + return -EINVAL; + + cmd = kzalloc(data_size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->rxq_mask = cpu_to_le32(rxq_mask); + cmd->count = cpu_to_le32(count); + cmd->flags = 0; + memcpy(cmd->payload, data, count); + + ret = iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + TRIGGER_RX_QUEUES_NOTIF_CMD), + 0, data_size, cmd); + + kfree(cmd); + return ret; +} + +void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, + int queue) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rxq_sync_notification *notif; + struct iwl_mvm_internal_rxq_notif *internal_notif; + + notif = (void *)pkt->data; + internal_notif = (void *)notif->payload; + + switch (internal_notif->type) { + case IWL_MVM_RXQ_NOTIF_DEL_BA: + /* TODO */ + break; + default: + WARN_ONCE(1, "Invalid identifier %d", internal_notif->type); + } +} + void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { -- cgit v1.2.3 From 7ed3eec53023deecffb61abbcb7f8408ff6ab064 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 25 Feb 2016 10:12:36 +0200 Subject: iwlwifi: mvm: remove unused field in iwl_mvm_tid_data Commit 69c7fda40921c125eeeef6a827f6270ac6aa1baa removed the users of iwl_mvm_tid_data.reduced_tpc. Due to a conflict, I forgot to commit the hunk that removed the field itself. Do this know. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/sta.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index db701cad87c1..1a8f69a41405 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -258,8 +258,6 @@ enum iwl_mvm_agg_state { * This is basically (last acked packet++). * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). - * @reduced_tpc: Reduced tx power. Holds the data between the - * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. * @state: state of the BA agreement establishment / tear down. * @txq_id: Tx queue used by the BA session @@ -274,7 +272,6 @@ struct iwl_mvm_tid_data { u16 next_reclaimed; /* The rest is Tx AGG related */ u32 rate_n_flags; - u8 reduced_tpc; bool amsdu_in_ampdu_allowed; enum iwl_mvm_agg_state state; u16 txq_id; -- cgit v1.2.3 From f92659a110436cdfdd8d7661188ba059b61404b7 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 3 Feb 2016 15:04:49 +0200 Subject: iwlwifi: mvm: support VHT MU-MIMO notification When the device is in d0i3/d3 we will not receive the VHT MU-MIMO group id management frame. Instead, firmware will notify us upon exit on the current status and we can in turn update mac80211. Support this notification. While at it, also check as a precaution that the vif is indeed the VHT MU-MIMO owner before updating the firmware. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h | 13 +++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 33 +++++++++++++++++++++-- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 2 ++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 3 +++ 4 files changed, 49 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h index f432ddd4cde3..4a0fc47c81f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h @@ -290,6 +290,7 @@ enum iwl_phy_ops_subcmd_ids { enum iwl_data_path_subcmd_ids { UPDATE_MU_GROUPS_CMD = 0x1, TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, + MU_GROUP_MGMT_NOTIF = 0xFE, RX_QUEUES_NOTIFICATION = 0xFF, }; @@ -1965,6 +1966,18 @@ struct iwl_mu_group_mgmt_cmd { __le32 user_position[4]; } __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */ +/** + * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification + * + * @membership_status: a bitmap of MU groups + * @user_position: the position of station in a group. If the station is in the + * group then bits (group * 2) is the position -1 + */ +struct iwl_mu_group_mgmt_notif { + __le32 membership_status[2]; + __le32 user_position[4]; +} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */ + #define MAX_STORED_BEACON_SIZE 600 /** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 1a4946fc9b27..b7551610f37e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -1780,6 +1781,34 @@ static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 0, sizeof(cmd), &cmd); } +static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + if (vif->mu_mimo_owner) { + struct iwl_mu_group_mgmt_notif *notif = _data; + + /* + * MU-MIMO Group Id action frame is little endian. We treat + * the data received from firmware as if it came from the + * action frame, so no conversion is needed. + */ + ieee80211_update_mu_groups(vif, + (u8 *)¬if->membership_status, + (u8 *)¬if->user_position); + } +} + +void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb) +{ + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_mu_mimo_iface_iterator, notif); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1890,10 +1919,10 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, /* * The firmware tracks the MU-MIMO group on its own. - * However, on HW restart we should restore this data + * However, on HW restart we should restore this data. */ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && - changes & BSS_CHANGED_MU_GROUPS) { + (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { ret = iwl_mvm_update_mu_groups(mvm, vif); if (ret) IWL_ERR(mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index f05d2a1f4467..21986f63682e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1272,6 +1272,8 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); +void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, + struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_window_status_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ac271ffe477b..8bdaa7b27ab1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -275,6 +275,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, iwl_mvm_rx_stored_beacon_notif, false), + RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, + iwl_mvm_mu_mimo_grp_notif, false), }; #undef RX_HANDLER @@ -405,6 +407,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = { static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), }; -- cgit v1.2.3 From f1704618987b2710760582c87b5f7f600e529a60 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Thu, 25 Feb 2016 10:19:59 +0200 Subject: iwlwifi: mvm: various trivial cleanups * Remove uneeded includes: iwl-csr.h and devcoredump aren't used in mac80211.c. * Remove uneeded empty line Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index b7551610f37e..85ea5a8b5e3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -70,7 +70,6 @@ #include #include #include -#include #include #include #include @@ -86,7 +85,6 @@ #include "testmode.h" #include "iwl-fw-error-dump.h" #include "iwl-prph.h" -#include "iwl-csr.h" #include "iwl-nvm-parse.h" #include "fw-dbg.h" @@ -2142,7 +2140,6 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } - } static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, -- cgit v1.2.3 From 21daff96a90e8bc2dfa57499c8305dbb9430f661 Mon Sep 17 00:00:00 2001 From: Beni Lev Date: Wed, 23 Dec 2015 17:39:57 +0300 Subject: iwlwifi: mvm: Set global RRM capability Allow to publish RRM capabilities without the need to support a minimal capability set. Since some RRM features(e.g. neighbor report) are fw independent, set this capability unconditionally. Signed-off-by: Beni Lev Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 85ea5a8b5e3a..4c2449b0ba86 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -610,6 +610,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; + wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_RRM); + mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP -- cgit v1.2.3 From cee5a882d0273aac0d622368885c9aac676cce33 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Wed, 3 Feb 2016 16:28:25 +0200 Subject: iwlwifi: mvm: forbid U-APSD for P2P Client if the firmware doesn't support it Older versions of the firmware don't support U-APSD for P2P Client. Forbid U-APSD for P2P Client when an old firmware is being used. Signed-off-by: Avri Altman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 4c2449b0ba86..86b4b9d81435 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2334,6 +2334,11 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) return; + if (vif->p2p && !iwl_mvm_is_p2p_standalone_uapsd_supported(mvm)) { + vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; + return; + } + if (iwlwifi_mod_params.uapsd_disable) { vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; return; -- cgit v1.2.3 From 283115fb1dcf6b2162e988db98e156b998a5ebb3 Mon Sep 17 00:00:00 2001 From: Avri Altman Date: Sun, 24 Jan 2016 22:16:08 +0200 Subject: iwlwifi: mvm: Send power command on BSS_CHANGED_BEACON_INFO if needed Beacon abort (ba) is set while sending power command, but only after at least one beacon_filter command was successfully sent. If we heard a beacon before starting association, this order is maintained and ba is properly set. However, if the first beacon is received after association, we send the power command upon association, configure the beacon filtering when the first beacon arrives, and in that case, beacon abort is not set. So identify this, and send a power command post the beacon_filter command if needed. Signed-off-by: Avri Altman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 86b4b9d81435..927b0559d42d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -1955,7 +1955,12 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); } - if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS)) { + if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | + /* + * Send power command on every beacon change, + * because we may have not enabled beacon abort yet. + */ + BSS_CHANGED_BEACON_INFO)) { ret = iwl_mvm_power_update_mac(mvm); if (ret) IWL_ERR(mvm, "failed to update power mode\n"); -- cgit v1.2.3 From e29cc6b9c99639d9502bde0693afd3e49ec1ffd7 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 28 Jan 2016 14:25:33 +0200 Subject: iwlwifi: mvm: take care of padded packets To ensure that the SNAP/TCP/IP headers are DW aligned, the firmware may add 2-byte pad at the end of the mac header - after the IV, before the SNAP. In that case the mpdu descriptor pad bit will be turned on. Driver should take it into consideration, and remove the padding before passing the packet to mac80211. Do that. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 30 ++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 590fc6faff04..cd6ca374e5d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -7,7 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH - * Copyright(c) 2015 Intel Deutschland GmbH + * Copyright(c) 2015 - 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -156,7 +156,14 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, u16 len, u8 crypt_len, struct iwl_rx_cmd_buffer *rxb) { - unsigned int hdrlen, fraglen; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; + unsigned int headlen, fraglen, pad_len = 0; + unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control); + + if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) + pad_len = 2; + len -= pad_len; /* If frame is small enough to fit in skb->head, pull it completely. * If not, only pull ieee80211_hdr (including crypto if present, and @@ -170,14 +177,23 @@ static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr, * If the latter changes (there are efforts in the standards group * to do so) we should revisit this and ieee80211_data_to_8023(). */ - hdrlen = (len <= skb_tailroom(skb)) ? len : - sizeof(*hdr) + crypt_len + 8; + headlen = (len <= skb_tailroom(skb)) ? len : + hdrlen + crypt_len + 8; + /* The firmware may align the packet to DWORD. + * The padding is inserted after the IV. + * After copying the header + IV skip the padding if + * present before copying packet data. + */ + hdrlen += crypt_len; memcpy(skb_put(skb, hdrlen), hdr, hdrlen); - fraglen = len - hdrlen; + memcpy(skb_put(skb, headlen - hdrlen), (u8 *)hdr + hdrlen + pad_len, + headlen - hdrlen); + + fraglen = len - headlen; if (fraglen) { - int offset = (void *)hdr + hdrlen - + int offset = (void *)hdr + headlen + pad_len - rxb_addr(rxb) + rxb_offset(rxb); skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, -- cgit v1.2.3 From eea76c364261aa4338f530c09a38afa69af41797 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 21 Feb 2016 16:29:17 +0200 Subject: iwlwifi: mvm: kill iwl_mvm_enable_agg_txq iwl_mvm_enable_agg_txq has only one user. Kill it and adapt the call site. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 16 ---------------- drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 16 +++++++++++----- 2 files changed, 11 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 21986f63682e..b461b909e7af 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1549,22 +1549,6 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); } -static inline void iwl_mvm_enable_agg_txq(struct iwl_mvm *mvm, int queue, - int mac80211_queue, int fifo, - int sta_id, int tid, int frame_limit, - u16 ssn, unsigned int wdg_timeout) -{ - struct iwl_trans_txq_scd_cfg cfg = { - .fifo = fifo, - .sta_id = sta_id, - .tid = tid, - .frame_limit = frame_limit, - .aggregate = true, - }; - - iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); -} - /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 4717b185f5b0..ef99942d7169 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -1052,9 +1052,16 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); - int queue, fifo, ret; + int queue, ret; u16 ssn; + struct iwl_trans_txq_scd_cfg cfg = { + .sta_id = mvmsta->sta_id, + .tid = tid, + .frame_limit = buf_size, + .aggregate = true, + }; + BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) != IWL_MAX_TID_COUNT); @@ -1069,11 +1076,10 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, tid_data->amsdu_in_ampdu_allowed = amsdu; spin_unlock_bh(&mvmsta->lock); - fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; + cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - iwl_mvm_enable_agg_txq(mvm, queue, - vif->hw_queue[tid_to_mac80211_ac[tid]], fifo, - mvmsta->sta_id, tid, buf_size, ssn, wdg_timeout); + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]], + ssn, &cfg, wdg_timeout); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) -- cgit v1.2.3 From 81d90442eac779938217c3444b240aa51fd3db47 Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Sun, 28 Feb 2016 11:04:06 +0300 Subject: Bluetooth: btusb: Add a new AR3012 ID 04ca:3014 T: Bus=01 Lev=01 Prnt=01 Port=04 Cnt=03 Dev#= 5 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=04ca ProdID=3014 Rev=00.02 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb BugLink: https://bugs.launchpad.net/bugs/1546694 Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org --- drivers/bluetooth/ath3k.c | 2 ++ drivers/bluetooth/btusb.c | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index e2ccf906d691..93747389dd28 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x04CA, 0x300d) }, { USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x3010) }, + { USB_DEVICE(0x04CA, 0x3014) }, { USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x021c) }, { USB_DEVICE(0x0930, 0x0220) }, @@ -157,6 +158,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 55fbdfc688e3..97f3bba93a8e 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -207,6 +207,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, -- cgit v1.2.3 From 32795a885ac31ccadd45e7886cc2ae31b2d2ef06 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Thu, 25 Feb 2016 10:18:34 +0200 Subject: iwlwifi: mvm: Disable beacon storing in D3 when WOWLAN configured Currently when entering D3 with WOWLAN configured, we enable in the configuration flags beacon storing, and do not disable beacon filtering, and do not wake up from a magic packet. Having both enabled is wrong (should not have both enabled), and causes problems in the RX queues in the FW, causing the FW not to recognize the magic packet when it comes. Disable beacon storing in wowlan configuration. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 5214482a0403..cf5e6349301e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -856,8 +856,7 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm, wowlan_config_cmd->is_11n_connection = ap_sta->ht_cap.ht_supported; wowlan_config_cmd->flags = ENABLE_L3_FILTERING | - ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING | - ENABLE_STORE_BEACON; + ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING; /* Query the last used seqno and set it */ ret = iwl_mvm_get_last_nonqos_seq(mvm, vif); -- cgit v1.2.3 From 3d2d442236b3f87cf9ff70c4252488ed7c6d5be8 Mon Sep 17 00:00:00 2001 From: Golan Ben-Ami Date: Tue, 23 Feb 2016 10:34:48 +0200 Subject: iwlwifi: support ucode with d0 unified image - regular and usniffer Till today, the ucode consisted of two d0 images - regular, in which the usniffer wasn't enabled, and usniffer, in which the usniffer logs were enabled. Lately, the two images were unified, so there is only one d0 image, in which the usniffer logs are enabled. Add new TLV capability for supporting the consolidated images (set 2, bit 13). Signed-off-by: Golan Ben-Ami Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 3 ++- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 3 +++ drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 +++- 3 files changed, 8 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index ab4c2a0470b2..ee4ffa599fd8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1033,7 +1033,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, } } - if (usniffer_req && !*usniffer_images) { + if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) && + usniffer_req && !*usniffer_images) { IWL_ERR(drv, "user selected to work with usniffer but usniffer image isn't available in ucode package\n"); return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 63dc109605e3..5f69bf5e04c7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -322,6 +322,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t; * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature * thresholds reporting * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command + * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in + * regular image. * * @NUM_IWL_UCODE_TLV_CAPA: number of bits used */ @@ -358,6 +360,7 @@ enum iwl_ucode_tlv_capa { IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW = (__force iwl_ucode_tlv_capa_t)74, IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75, IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76, + IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77, NUM_IWL_UCODE_TLV_CAPA #ifdef __CHECKER__ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 05475a2bff90..f075c36a77c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -537,7 +537,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, struct iwl_sf_region st_fwrd_space; if (ucode_type == IWL_UCODE_REGULAR && - iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE)) + iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) && + !(fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED))) fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER); else fw = iwl_get_ucode_image(mvm, ucode_type); -- cgit v1.2.3 From f12d33f4d83c6837d176e1aef337914089c77957 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Sun, 28 Feb 2016 16:49:29 +0900 Subject: 3c59x: Ensure to apply the expires time In commit 5b6490def9168af6a ("3c59x: Use setup_timer()") Amitoj removed add_timer which sets up the epires timer. In this patch the behavior is restore but it uses mod_timer which is a bit more compact. Signed-off-by: Stafford Horne Signed-off-by: David S. Miller --- drivers/net/ethernet/3com/3c59x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index c377607e6745..7b881edc0b0a 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1602,7 +1602,7 @@ vortex_up(struct net_device *dev) } setup_timer(&vp->timer, vortex_timer, (unsigned long)dev); - vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait); + mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait)); setup_timer(&vp->rx_oom_timer, rx_oom_timer, (unsigned long)dev); if (vortex_debug > 1) -- cgit v1.2.3 From 34bf1912bfc06bd9200893916078eb0f16480a95 Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Sun, 28 Feb 2016 21:25:19 +0100 Subject: Bluetooth: hci_uart: Add diag and address support for Intel/AG6xx The AG6xx devices behave similar to Wilkens Peak and Stone Peak and with that it is needed to check for Intel default address. In addition it is possible to enable vendor events and diag support. Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg --- drivers/bluetooth/hci_ag6xx.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_ag6xx.c b/drivers/bluetooth/hci_ag6xx.c index ea65c2d089ed..6923d17a022f 100644 --- a/drivers/bluetooth/hci_ag6xx.c +++ b/drivers/bluetooth/hci_ag6xx.c @@ -174,6 +174,9 @@ static int ag6xx_setup(struct hci_uart *hu) bool patched = false; int err; + hu->hdev->set_diag = btintel_set_diag; + hu->hdev->set_bdaddr = btintel_set_bdaddr; + err = btintel_enter_mfg(hdev); if (err) return err; @@ -298,8 +301,16 @@ patch: complete: /* Exit manufacturing mode and reset */ err = btintel_exit_mfg(hdev, true, patched); + if (err) + return err; - return err; + /* Set the event mask for Intel specific vendor events. This enables + * a few extra events that are useful during general operation. + */ + btintel_set_event_mask_mfg(hdev, false); + + btintel_check_bdaddr(hdev); + return 0; } static const struct hci_uart_proto ag6xx_proto = { -- cgit v1.2.3 From 49eb93892d8dfcf5dc5088e640f486d08572d8bf Mon Sep 17 00:00:00 2001 From: "sixiao@microsoft.com" Date: Thu, 25 Feb 2016 15:24:08 -0800 Subject: hv_netvsc: add ethtool support for set and get of settings This patch allows the user to set and retrieve speed and duplex of the hv_netvsc device via ethtool. Example: $ ethtool eth0 Settings for eth0: ... Speed: Unknown! Duplex: Unknown! (255) ... $ ethtool -s eth0 speed 1000 duplex full $ ethtool eth0 Settings for eth0: ... Speed: 1000Mb/s Duplex: Full ... This is based on patches by Roopa Prabhu and Nikolay Aleksandrov. Signed-off-by: Simon Xiao Signed-off-by: David S. Miller --- drivers/net/hyperv/hyperv_net.h | 4 +++ drivers/net/hyperv/netvsc_drv.c | 56 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) (limited to 'drivers') diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index fcb92c0d0eb9..b4c68783dfc3 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -658,6 +658,10 @@ struct net_device_context { struct netvsc_stats __percpu *tx_stats; struct netvsc_stats __percpu *rx_stats; + + /* Ethtool settings */ + u8 duplex; + u32 speed; }; /* Per netvsc device */ diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 2b6595e24f43..08608499fa17 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -799,6 +799,58 @@ static int netvsc_set_channels(struct net_device *net, goto do_set; } +static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd) +{ + struct ethtool_cmd diff1 = *cmd; + struct ethtool_cmd diff2 = {}; + + ethtool_cmd_speed_set(&diff1, 0); + diff1.duplex = 0; + /* advertising and cmd are usually set */ + diff1.advertising = 0; + diff1.cmd = 0; + /* We set port to PORT_OTHER */ + diff2.port = PORT_OTHER; + + return !memcmp(&diff1, &diff2, sizeof(diff1)); +} + +static void netvsc_init_settings(struct net_device *dev) +{ + struct net_device_context *ndc = netdev_priv(dev); + + ndc->speed = SPEED_UNKNOWN; + ndc->duplex = DUPLEX_UNKNOWN; +} + +static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct net_device_context *ndc = netdev_priv(dev); + + ethtool_cmd_speed_set(cmd, ndc->speed); + cmd->duplex = ndc->duplex; + cmd->port = PORT_OTHER; + + return 0; +} + +static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct net_device_context *ndc = netdev_priv(dev); + u32 speed; + + speed = ethtool_cmd_speed(cmd); + if (!ethtool_validate_speed(speed) || + !ethtool_validate_duplex(cmd->duplex) || + !netvsc_validate_ethtool_ss_cmd(cmd)) + return -EINVAL; + + ndc->speed = speed; + ndc->duplex = cmd->duplex; + + return 0; +} + static int netvsc_change_mtu(struct net_device *ndev, int mtu) { struct net_device_context *ndevctx = netdev_priv(ndev); @@ -923,6 +975,8 @@ static const struct ethtool_ops ethtool_ops = { .get_channels = netvsc_get_channels, .set_channels = netvsc_set_channels, .get_ts_info = ethtool_op_get_ts_info, + .get_settings = netvsc_get_settings, + .set_settings = netvsc_set_settings, }; static const struct net_device_ops device_ops = { @@ -1115,6 +1169,8 @@ static int netvsc_probe(struct hv_device *dev, netif_set_real_num_tx_queues(net, nvdev->num_chn); netif_set_real_num_rx_queues(net, nvdev->num_chn); + netvsc_init_settings(net); + ret = register_netdev(net); if (ret != 0) { pr_err("Unable to register netdev.\n"); -- cgit v1.2.3 From 87177ba6e47e4e3268b999843399ee1566f682ac Mon Sep 17 00:00:00 2001 From: "Woojung.Huh@microchip.com" Date: Thu, 25 Feb 2016 23:33:07 +0000 Subject: lan78xx: replace devid to chipid & chiprev Replace devid to chipid & chiprev for easy access. Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 20 +++++++++++--------- drivers/net/usb/lan78xx.h | 1 + 2 files changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 1c299b8a162d..4ec25e84f472 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -278,7 +278,8 @@ struct lan78xx_net { int link_on; u8 mdix_ctrl; - u32 devid; + u32 chipid; + u32 chiprev; struct mii_bus *mdiobus; }; @@ -471,7 +472,7 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, */ ret = lan78xx_read_reg(dev, HW_CFG, &val); saved = val; - if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) { + if (dev->chipid == ID_REV_CHIP_ID_7800_) { val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); ret = lan78xx_write_reg(dev, HW_CFG, val); } @@ -505,7 +506,7 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, retval = 0; exit: - if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) + if (dev->chipid == ID_REV_CHIP_ID_7800_) ret = lan78xx_write_reg(dev, HW_CFG, saved); return retval; @@ -539,7 +540,7 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, */ ret = lan78xx_read_reg(dev, HW_CFG, &val); saved = val; - if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) { + if (dev->chipid == ID_REV_CHIP_ID_7800_) { val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); ret = lan78xx_write_reg(dev, HW_CFG, val); } @@ -587,7 +588,7 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, retval = 0; exit: - if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) + if (dev->chipid == ID_REV_CHIP_ID_7800_) ret = lan78xx_write_reg(dev, HW_CFG, saved); return retval; @@ -1555,9 +1556,9 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - switch (dev->devid & ID_REV_CHIP_ID_MASK_) { - case 0x78000000: - case 0x78500000: + switch (dev->chipid) { + case ID_REV_CHIP_ID_7800_: + case ID_REV_CHIP_ID_7850_: /* set to internal PHY id */ dev->mdiobus->phy_mask = ~(1 << 1); break; @@ -1918,7 +1919,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) /* save DEVID for later usage */ ret = lan78xx_read_reg(dev, ID_REV, &buf); - dev->devid = buf; + dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16; + dev->chiprev = buf & ID_REV_CHIP_REV_MASK_; /* Respond to the IN token with a NAK */ ret = lan78xx_read_reg(dev, USB_CFG0, &buf); diff --git a/drivers/net/usb/lan78xx.h b/drivers/net/usb/lan78xx.h index a93fb653e7c5..40927906109a 100644 --- a/drivers/net/usb/lan78xx.h +++ b/drivers/net/usb/lan78xx.h @@ -107,6 +107,7 @@ #define ID_REV_CHIP_ID_MASK_ (0xFFFF0000) #define ID_REV_CHIP_REV_MASK_ (0x0000FFFF) #define ID_REV_CHIP_ID_7800_ (0x7800) +#define ID_REV_CHIP_ID_7850_ (0x7850) #define FPGA_REV (0x04) #define FPGA_REV_MINOR_MASK_ (0x0000FF00) -- cgit v1.2.3 From e270b2dbd83caf091348f609235d3e28f1b754af Mon Sep 17 00:00:00 2001 From: "Woojung.Huh@microchip.com" Date: Thu, 25 Feb 2016 23:33:09 +0000 Subject: lan78xx: remove unnecessary code It is not required after commit cd772de358d6 ("phy: keep pause flags in phy driver features") Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 4ec25e84f472..d1004216d4f3 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -1623,12 +1623,7 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) /* MAC doesn't support 1000T Half */ phydev->supported &= ~SUPPORTED_1000baseT_Half; - phydev->supported |= (SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full | - SUPPORTED_Pause | SUPPORTED_Asym_Pause); + genphy_config_aneg(phydev); phy_start(phydev); -- cgit v1.2.3 From 349e0c5e2237744d2acc1bdb52526a69c5fdcd52 Mon Sep 17 00:00:00 2001 From: "Woojung.Huh@microchip.com" Date: Thu, 25 Feb 2016 23:33:14 +0000 Subject: lan78xx: add ethtool set & get pause functions Add ethtool operations of set_pauseram and get_pauseparm. Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 80 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 77 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index d1004216d4f3..705c180163c5 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -36,7 +36,7 @@ #define DRIVER_AUTHOR "WOOJUNG HUH " #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.2" +#define DRIVER_VERSION "1.0.3" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@ -281,6 +281,9 @@ struct lan78xx_net { u32 chipid; u32 chiprev; struct mii_bus *mdiobus; + + int fc_autoneg; + u8 fc_request_control; }; /* use ethtool to change the level for any given device */ @@ -902,11 +905,15 @@ static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex, { u32 flow = 0, fct_flow = 0; int ret; + u8 cap; - u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + if (dev->fc_autoneg) + cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); + else + cap = dev->fc_request_control; if (cap & FLOW_CTRL_TX) - flow = (FLOW_CR_TX_FCEN_ | 0xFFFF); + flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF); if (cap & FLOW_CTRL_RX) flow |= FLOW_CR_RX_FCEN_; @@ -1386,6 +1393,62 @@ static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd) return ret; } +static void lan78xx_get_pause(struct net_device *net, + struct ethtool_pauseparam *pause) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct phy_device *phydev = net->phydev; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + + phy_ethtool_gset(phydev, &ecmd); + + pause->autoneg = dev->fc_autoneg; + + if (dev->fc_request_control & FLOW_CTRL_TX) + pause->tx_pause = 1; + + if (dev->fc_request_control & FLOW_CTRL_RX) + pause->rx_pause = 1; +} + +static int lan78xx_set_pause(struct net_device *net, + struct ethtool_pauseparam *pause) +{ + struct lan78xx_net *dev = netdev_priv(net); + struct phy_device *phydev = net->phydev; + struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET }; + int ret; + + phy_ethtool_gset(phydev, &ecmd); + + if (pause->autoneg && !ecmd.autoneg) { + ret = -EINVAL; + goto exit; + } + + dev->fc_request_control = 0; + if (pause->rx_pause) + dev->fc_request_control |= FLOW_CTRL_RX; + + if (pause->tx_pause) + dev->fc_request_control |= FLOW_CTRL_TX; + + if (ecmd.autoneg) { + u32 mii_adv; + + ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); + ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + phy_ethtool_sset(phydev, &ecmd); + } + + dev->fc_autoneg = pause->autoneg; + + ret = 0; +exit: + return ret; +} + static const struct ethtool_ops lan78xx_ethtool_ops = { .get_link = lan78xx_get_link, .nway_reset = lan78xx_nway_reset, @@ -1404,6 +1467,8 @@ static const struct ethtool_ops lan78xx_ethtool_ops = { .set_wol = lan78xx_set_wol, .get_eee = lan78xx_get_eee, .set_eee = lan78xx_set_eee, + .get_pauseparam = lan78xx_get_pause, + .set_pauseparam = lan78xx_set_pause, }; static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) @@ -1591,6 +1656,7 @@ static void lan78xx_link_status_change(struct net_device *net) static int lan78xx_phy_init(struct lan78xx_net *dev) { int ret; + u32 mii_adv; struct phy_device *phydev = dev->net->phydev; phydev = phy_find_first(dev->mdiobus); @@ -1624,8 +1690,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) /* MAC doesn't support 1000T Half */ phydev->supported &= ~SUPPORTED_1000baseT_Half; + /* support both flow controls */ + dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); + phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); + phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + genphy_config_aneg(phydev); + dev->fc_autoneg = phydev->autoneg; + phy_start(phydev); netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); -- cgit v1.2.3 From 3874d6a8b61966a77aa743b4160ba96bf3081ce5 Mon Sep 17 00:00:00 2001 From: Jeffrey Huang Date: Fri, 26 Feb 2016 03:59:59 -0500 Subject: bnxt_en: Improve bnxt_vf_update_mac(). Allow the VF to setup its own MAC address if the PF has not administratively set it for the VF. To do that, we should always store the MAC address from the firmware. There are 2 cases: 1. The MAC address is valid. This MAC address is assigned by the PF and it needs to override the current VF MAC address. 2. The MAC address is zero. The VF will use a random MAC address by default. By storing this 0 MAC address in the VF structure, it will allow the VF user to change the MAC address later using ndo_set_mac_address() when it sees that the stored MAC address is 0. v2: Expanded descriptions and added more comments. Signed-off-by: Jeffrey Huang Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index c1cc83d7e38c..0b4ca35cee9b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -809,13 +809,19 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) goto update_vf_mac_exit; - if (!is_valid_ether_addr(resp->perm_mac_address)) - goto update_vf_mac_exit; - + /* Store MAC address from the firmware. There are 2 cases: + * 1. MAC address is valid. It is assigned from the PF and we + * need to override the current VF MAC address with it. + * 2. MAC address is zero. The VF will use a random MAC address by + * default but the stored zero MAC will allow the VF user to change + * the random MAC address using ndo_set_mac_address() if he wants. + */ if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr)) memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN); - /* overwrite netdev dev_adr with admin VF MAC */ - memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); + + /* overwrite netdev dev_addr with admin VF MAC */ + if (is_valid_ether_addr(bp->vf.mac_addr)) + memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: mutex_unlock(&bp->hwrm_cmd_lock); } -- cgit v1.2.3 From 19241368443ff976b1924019d29eef8e972158e7 Mon Sep 17 00:00:00 2001 From: Jeffrey Huang Date: Fri, 26 Feb 2016 04:00:00 -0500 Subject: bnxt_en: Send PF driver unload notification to all VFs. During remove_one() when SRIOV is enabled, the PF driver should broadcast PF driver unload notification to all VFs that are attached to VMs. Upon receiving the PF driver unload notification, the VF driver should print a warning message to message log. Certain operations on the VF may not succeed after the PF has unloaded. Signed-off-by: Jeffrey Huang Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 10 ++++-- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 43 +++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ff1507f3e226..80c441298e16 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1239,13 +1239,17 @@ static int bnxt_async_event_process(struct bnxt *bp, switch (event_id) { case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event); - schedule_work(&bp->sp_task); + break; + case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: + set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event); break; default: netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n", event_id); - break; + goto async_event_process_exit; } + schedule_work(&bp->sp_task); +async_event_process_exit: return 0; } @@ -5559,6 +5563,8 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp) } } } + if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event)) + netdev_info(bp->dev, "Receive PF driver unload event!"); } #else diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2be51b332652..cc798d5a3527 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -986,6 +986,7 @@ struct bnxt { #define BNXT_VXLAN_DEL_PORT_SP_EVENT 5 #define BNXT_RESET_TASK_SP_EVENT 6 #define BNXT_RST_RING_SP_EVENT 7 +#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 0b4ca35cee9b..9f6b4cc6bd0e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -522,6 +522,46 @@ err_out1: return rc; } +static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, + struct bnxt_vf_info *vf, + u16 event_id) +{ + int rc = 0; + struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_async_event_cmpl *async_cmpl; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + if (vf) + req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + else + /* broadcast this async event to all VFs */ + req.encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + async_cmpl->type = + cpu_to_le16(HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); + async_cmpl->event_id = cpu_to_le16(event_id); + + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + if (rc) { + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", + rc); + goto fwd_async_event_cmpl_exit; + } + + if (resp->error_code) { + netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl error %d\n", + resp->error_code); + rc = -1; + } + +fwd_async_event_cmpl_exit: + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + void bnxt_sriov_disable(struct bnxt *bp) { u16 num_vfs = pci_num_vf(bp->pdev); @@ -530,6 +570,9 @@ void bnxt_sriov_disable(struct bnxt *bp) return; if (pci_vfs_assigned(bp->pdev)) { + bnxt_hwrm_fwd_async_event_cmpl( + bp, NULL, + HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD); netdev_warn(bp->dev, "Unable to free %d VFs because some are assigned to VMs.\n", num_vfs); } else { -- cgit v1.2.3 From dfb5b894f87cb78168e04283e8d15626dc3e6d5a Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:01 -0500 Subject: bnxt_en: Store irq coalescing timer values in micro seconds. Don't convert these to internal hardware tick values before storing them. This avoids the confusion of ethtool -c returning slightly different values than the ones set using ethtool -C when we convert hardware tick values back to micro seconds. Add better comments for the hardware settings. Also, rename the current set of coalescing fields with rx_ prefix. The next patch will add support of tx coalescing values. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 33 ++++++++++++++--------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 9 +++---- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 21 +++++++-------- 3 files changed, 34 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 80c441298e16..a7103b5cde1c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3532,20 +3532,25 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - /* Each rx completion (2 records) should be DMAed immediately */ - max_buf = min_t(u16, bp->coal_bufs / 4, 2); + /* Each rx completion (2 records) should be DMAed immediately. + * DMA 1/4 of the completion buffers at a time. + */ + max_buf = min_t(u16, bp->rx_coal_bufs / 4, 2); /* max_buf must not be zero */ max_buf = clamp_t(u16, max_buf, 1, 63); - max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63); - buf_tmr = max_t(u16, bp->coal_ticks / 4, 1); - buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1); + max_buf_irq = clamp_t(u16, bp->rx_coal_bufs_irq, 1, 63); + buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks); + /* buf timer set to 1/4 of interrupt timer */ + buf_tmr = max_t(u16, buf_tmr / 4, 1); + buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->rx_coal_ticks_irq); + buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; /* RING_IDLE generates more IRQs for lower latency. Enable it only * if coal_ticks is less than 25 us. */ - if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25) + if (bp->rx_coal_ticks < 25) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; req.flags = cpu_to_le16(flags); @@ -3553,9 +3558,10 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq); req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr); req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq); - req.int_lat_tmr_min = cpu_to_le16(buf_tmr); - req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks); - req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs); + /* Minimum time between 2 interrupts set to buf_tmr x 2 */ + req.int_lat_tmr_min = cpu_to_le16(buf_tmr * 2); + req.int_lat_tmr_max = cpu_to_le16(buf_tmr * 4); + req.num_cmpl_aggr_int = cpu_to_le16(max_buf * 4); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { @@ -5295,10 +5301,11 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE; bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; - bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4); - bp->coal_bufs = 20; - bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1); - bp->coal_bufs_irq = 2; + /* tick values in micro seconds */ + bp->rx_coal_ticks = 4; + bp->rx_coal_bufs = 20; + bp->rx_coal_ticks_irq = 1; + bp->rx_coal_bufs_irq = 2; init_timer(&bp->timer); bp->timer.data = (unsigned long)bp; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index cc798d5a3527..6913307e7612 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -968,13 +968,12 @@ struct bnxt { __le16 vxlan_fw_dst_port_id; u8 nge_port_cnt; __le16 nge_fw_dst_port_id; - u16 coal_ticks; - u16 coal_ticks_irq; - u16 coal_bufs; - u16 coal_bufs_irq; + u16 rx_coal_ticks; + u16 rx_coal_ticks_irq; + u16 rx_coal_bufs; + u16 rx_coal_bufs_irq; #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) -#define BNXT_COAL_TIMER_TO_USEC(x) ((x) * 2 / 25) struct work_struct sp_task; unsigned long sp_event; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 3238817dfd5f..d18a97727394 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -41,12 +41,11 @@ static int bnxt_get_coalesce(struct net_device *dev, memset(coal, 0, sizeof(*coal)); - coal->rx_coalesce_usecs = - max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks), 1); - coal->rx_max_coalesced_frames = bp->coal_bufs / 2; - coal->rx_coalesce_usecs_irq = - max_t(u16, BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks_irq), 1); - coal->rx_max_coalesced_frames_irq = bp->coal_bufs_irq / 2; + coal->rx_coalesce_usecs = bp->rx_coal_ticks; + /* 2 completion records per rx packet */ + coal->rx_max_coalesced_frames = bp->rx_coal_bufs / 2; + coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq; + coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2; return 0; } @@ -57,11 +56,11 @@ static int bnxt_set_coalesce(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); int rc = 0; - bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs); - bp->coal_bufs = coal->rx_max_coalesced_frames * 2; - bp->coal_ticks_irq = - BNXT_USEC_TO_COAL_TIMER(coal->rx_coalesce_usecs_irq); - bp->coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; + bp->rx_coal_ticks = coal->rx_coalesce_usecs; + /* 2 completion records per rx packet */ + bp->rx_coal_bufs = coal->rx_max_coalesced_frames * 2; + bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq; + bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; if (netif_running(dev)) rc = bnxt_hwrm_set_coal(bp); -- cgit v1.2.3 From bb053f52a54d66a6057c2220458349f7d39ce0d2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:02 -0500 Subject: bnxt_en: Refactor bnxt_hwrm_set_coal(). Add a function to set all the coalescing parameters. The function can be used later to set both rx and tx coalescing parameters. v2: Fixed function parameters formatting requested by DaveM. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a7103b5cde1c..a391c9c9ab68 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3521,6 +3521,21 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } } +static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, + u32 buf_tmrs, u16 flags, + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) +{ + req->flags = cpu_to_le16(flags); + req->num_cmpl_dma_aggr = cpu_to_le16((u16)max_bufs); + req->num_cmpl_dma_aggr_during_int = cpu_to_le16(max_bufs >> 16); + req->cmpl_aggr_dma_tmr = cpu_to_le16((u16)buf_tmrs); + req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmrs >> 16); + /* Minimum time between 2 interrupts set to buf_tmr x 2 */ + req->int_lat_tmr_min = cpu_to_le16((u16)buf_tmrs * 2); + req->int_lat_tmr_max = cpu_to_le16((u16)buf_tmrs * 4); + req->num_cmpl_aggr_int = cpu_to_le16((u16)max_bufs * 4); +} + int bnxt_hwrm_set_coal(struct bnxt *bp) { int i, rc = 0; @@ -3553,15 +3568,8 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) if (bp->rx_coal_ticks < 25) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; - req.flags = cpu_to_le16(flags); - req.num_cmpl_dma_aggr = cpu_to_le16(max_buf); - req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq); - req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr); - req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq); - /* Minimum time between 2 interrupts set to buf_tmr x 2 */ - req.int_lat_tmr_min = cpu_to_le16(buf_tmr * 2); - req.int_lat_tmr_max = cpu_to_le16(buf_tmr * 4); - req.num_cmpl_aggr_int = cpu_to_le16(max_buf * 4); + bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, + buf_tmr_irq << 16 | buf_tmr, flags, &req); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { -- cgit v1.2.3 From dfc9c94a83909f4be80e5d0c67e79793830aa312 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:03 -0500 Subject: bnxt_en: Add coalescing support for tx rings. When tx and rx rings don't share the same completion ring, tx coalescing parameters can be set differently from the rx coalescing parameters. Otherwise, use rx coalescing parameters on shared completion rings. Adjust rx coalescing default values to lower interrupt rate. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 42 ++++++++++++++++++----- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 5 +++ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 10 ++++++ 3 files changed, 49 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a391c9c9ab68..198c69d49556 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3539,13 +3539,16 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, int bnxt_hwrm_set_coal(struct bnxt *bp) { int i, rc = 0; - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, + req_tx = {0}, *req; u16 max_buf, max_buf_irq; u16 buf_tmr, buf_tmr_irq; u32 flags; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, - -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req_rx, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req_tx, + HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); /* Each rx completion (2 records) should be DMAed immediately. * DMA 1/4 of the completion buffers at a time. @@ -3569,13 +3572,31 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE; bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, - buf_tmr_irq << 16 | buf_tmr, flags, &req); + buf_tmr_irq << 16 | buf_tmr, flags, &req_rx); + + /* max_buf must not be zero */ + max_buf = clamp_t(u16, bp->tx_coal_bufs, 1, 63); + max_buf_irq = clamp_t(u16, bp->tx_coal_bufs_irq, 1, 63); + buf_tmr = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks); + /* buf timer set to 1/4 of interrupt timer */ + buf_tmr = max_t(u16, buf_tmr / 4, 1); + buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(bp->tx_coal_ticks_irq); + buf_tmr_irq = max_t(u16, buf_tmr_irq, 1); + + flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET; + bnxt_hwrm_set_coal_params(bp, max_buf_irq << 16 | max_buf, + buf_tmr_irq << 16 | buf_tmr, flags, &req_tx); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < bp->cp_nr_rings; i++) { - req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + struct bnxt_napi *bnapi = bp->bnapi[i]; - rc = _hwrm_send_message(bp, &req, sizeof(req), + req = &req_rx; + if (!bnapi->rx_ring) + req = &req_tx; + req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id); + + rc = _hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); if (rc) break; @@ -5310,11 +5331,16 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE; /* tick values in micro seconds */ - bp->rx_coal_ticks = 4; - bp->rx_coal_bufs = 20; + bp->rx_coal_ticks = 12; + bp->rx_coal_bufs = 30; bp->rx_coal_ticks_irq = 1; bp->rx_coal_bufs_irq = 2; + bp->tx_coal_ticks = 25; + bp->tx_coal_bufs = 30; + bp->tx_coal_ticks_irq = 2; + bp->tx_coal_bufs_irq = 2; + init_timer(&bp->timer); bp->timer.data = (unsigned long)bp; bp->timer.function = bnxt_timer; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6913307e7612..ba67c4a66ef3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -968,10 +968,15 @@ struct bnxt { __le16 vxlan_fw_dst_port_id; u8 nge_port_cnt; __le16 nge_fw_dst_port_id; + u16 rx_coal_ticks; u16 rx_coal_ticks_irq; u16 rx_coal_bufs; u16 rx_coal_bufs_irq; + u16 tx_coal_ticks; + u16 tx_coal_ticks_irq; + u16 tx_coal_bufs; + u16 tx_coal_bufs_irq; #define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index d18a97727394..a2d25499a73c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -47,6 +47,11 @@ static int bnxt_get_coalesce(struct net_device *dev, coal->rx_coalesce_usecs_irq = bp->rx_coal_ticks_irq; coal->rx_max_coalesced_frames_irq = bp->rx_coal_bufs_irq / 2; + coal->tx_coalesce_usecs = bp->tx_coal_ticks; + coal->tx_max_coalesced_frames = bp->tx_coal_bufs; + coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq; + coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq; + return 0; } @@ -62,6 +67,11 @@ static int bnxt_set_coalesce(struct net_device *dev, bp->rx_coal_ticks_irq = coal->rx_coalesce_usecs_irq; bp->rx_coal_bufs_irq = coal->rx_max_coalesced_frames_irq * 2; + bp->tx_coal_ticks = coal->tx_coalesce_usecs; + bp->tx_coal_bufs = coal->tx_max_coalesced_frames; + bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq; + bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq; + if (netif_running(dev)) rc = bnxt_hwrm_set_coal(bp); -- cgit v1.2.3 From ff4fe81d2d49e3cad3bb45c8c5b9a49ca90ee10b Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:04 -0500 Subject: bnxt_en: Use firmware provided message timeout value. Before this patch, we used a hardcoded value of 500 msec as the default value for firmware message response timeout. For better portability with future hardware or debug platforms, use the value provided by firmware in the first response and store it for all susequent messages. Redefine the macro HWRM_CMD_TIMEOUT to the stored value. Since we don't have the value yet in the first message, use the 500 ms default if the stored value is zero. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 7 +++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 +++- 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 198c69d49556..05a93a7efd8c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2638,6 +2638,9 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) /* Ring channel doorbell */ writel(1, bp->bar0 + 0x100); + if (!timeout) + timeout = DFLT_HWRM_CMD_TIMEOUT; + i = 0; if (intr_process) { /* Wait until hwrm response cmpl interrupt is processed */ @@ -3809,6 +3812,10 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); + bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout); + if (!bp->hwrm_cmd_timeout) + bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index ba67c4a66ef3..a3d49406c9c3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -477,7 +477,8 @@ struct rx_tpa_end_cmp_ext { #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) -#define HWRM_CMD_TIMEOUT 500 +#define DFLT_HWRM_CMD_TIMEOUT 500 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) #define HWRM_RESP_ERR_CODE_MASK 0xffff #define HWRM_RESP_LEN_MASK 0xffff0000 @@ -957,6 +958,7 @@ struct bnxt { void *hwrm_dbg_resp_addr; dma_addr_t hwrm_dbg_resp_dma_addr; #define HWRM_DBG_REG_BUF_SIZE 128 + int hwrm_cmd_timeout; struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ struct hwrm_ver_get_output ver_resp; #define FW_VER_STR_LEN 32 -- cgit v1.2.3 From a8643e1604c1f39a675c6b10a7f84260fa13590c Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:05 -0500 Subject: bnxt_en: Fix dmesg log firmware error messages. Use appropriate firmware request header structure to prepare the firmware messages. This avoids the unnecessary conversion of the fields to 32-bit fields. Add appropriate endian conversion when printing out the message fields in dmesg so that they appear correct in the log. Reported-by: Rob Swindell Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 29 +++++++++++-------------- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 15 ++----------- drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | 4 ++-- 3 files changed, 17 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 05a93a7efd8c..9ff498e32743 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2600,28 +2600,26 @@ alloc_mem_err: void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, u16 cmpl_ring, u16 target_id) { - struct hwrm_cmd_req_hdr *req = request; + struct input *req = request; - req->cmpl_ring_req_type = - cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT)); - req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT); + req->req_type = cpu_to_le16(req_type); + req->cmpl_ring = cpu_to_le16(cmpl_ring); + req->target_id = cpu_to_le16(target_id); req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); } int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) { int i, intr_process, rc; - struct hwrm_cmd_req_hdr *req = msg; + struct input *req = msg; u32 *data = msg; __le32 *resp_len, *valid; u16 cp_ring_id, len = 0; struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++); + req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); memset(resp, 0, PAGE_SIZE); - cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) & - HWRM_CMPL_RING_MASK) >> - HWRM_CMPL_RING_SFT; + cp_ring_id = le16_to_cpu(req->cmpl_ring); intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; /* Write request msg to hwrm channel */ @@ -2632,8 +2630,7 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) /* currently supports only one outstanding message */ if (intr_process) - bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) & - HWRM_SEQ_ID_MASK; + bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); /* Ring channel doorbell */ writel(1, bp->bar0 + 0x100); @@ -2651,7 +2648,7 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - req->cmpl_ring_req_type); + le16_to_cpu(req->req_type)); return -1; } } else { @@ -2667,8 +2664,8 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) if (i >= timeout) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - timeout, req->cmpl_ring_req_type, - req->target_id_seq_id, *resp_len); + timeout, le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), *resp_len); return -1; } @@ -2682,8 +2679,8 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) if (i >= timeout) { netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - timeout, req->cmpl_ring_req_type, - req->target_id_seq_id, len, *valid); + timeout, le16_to_cpu(req->req_type), + le16_to_cpu(req->seq_id), len, *valid); return -1; } } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a3d49406c9c3..3936e966595e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -481,9 +481,11 @@ struct rx_tpa_end_cmp_ext { #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) #define HWRM_RESP_ERR_CODE_MASK 0xffff +#define HWRM_RESP_LEN_OFFSET 4 #define HWRM_RESP_LEN_MASK 0xffff0000 #define HWRM_RESP_LEN_SFT 16 #define HWRM_RESP_VALID_MASK 0xff000000 +#define HWRM_SEQ_ID_INVALID -1 #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) @@ -645,19 +647,6 @@ struct bnxt_irq { #define INVALID_STATS_CTX_ID -1 -struct hwrm_cmd_req_hdr { -#define HWRM_CMPL_RING_MASK 0xffff0000 -#define HWRM_CMPL_RING_SFT 16 - __le32 cmpl_ring_req_type; -#define HWRM_SEQ_ID_MASK 0xffff -#define HWRM_SEQ_ID_INVALID -1 -#define HWRM_RESP_LEN_OFFSET 4 -#define HWRM_TARGET_FID_MASK 0xffff0000 -#define HWRM_TARGET_FID_SFT 16 - __le32 target_id_seq_id; - __le64 resp_addr; -}; - struct bnxt_ring_grp_info { u16 fw_stats_ctx; u16 fw_grp_id; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 9f6b4cc6bd0e..0c5f510492f1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -801,8 +801,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) static int bnxt_vf_req_validate_snd(struct bnxt *bp, struct bnxt_vf_info *vf) { int rc = 0; - struct hwrm_cmd_req_hdr *encap_req = vf->hwrm_cmd_req_addr; - u32 req_type = le32_to_cpu(encap_req->cmpl_ring_req_type) & 0xffff; + struct input *encap_req = vf->hwrm_cmd_req_addr; + u32 req_type = le16_to_cpu(encap_req->req_type); switch (req_type) { case HWRM_CFA_L2_FILTER_ALLOC: -- cgit v1.2.3 From 3ebf6f0a09a284adef62111c7cfca29f56d6cce7 Mon Sep 17 00:00:00 2001 From: Rob Swindell Date: Fri, 26 Feb 2016 04:00:06 -0500 Subject: bnxt_en: Add installed-package firmware version reporting via Ethtool GDRVINFO For everything to fit, we remove the PHY microcode version and replace it with the firmware package version in the fw_version string. Signed-off-by: Rob Swindell Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 8 +- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 95 +++++++++++++++++++++- drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h | 14 ++++ 3 files changed, 109 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 9ff498e32743..6e45a00905b9 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3805,7 +3805,7 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) resp->hwrm_intf_upd); netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n"); } - snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d", + snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d/%d.%d.%d", resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld, resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd); @@ -5725,7 +5725,6 @@ static int bnxt_probe_phy(struct bnxt *bp) { int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; - char phy_ver[PHY_VER_STR_LEN]; rc = bnxt_update_link(bp, false); if (rc) { @@ -5745,11 +5744,6 @@ static int bnxt_probe_phy(struct bnxt *bp) link_info->req_duplex = link_info->duplex_setting; link_info->req_flow_ctrl = link_info->force_pause_setting; } - snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", - link_info->phy_ver[0], - link_info->phy_ver[1], - link_info->phy_ver[2]); - strcat(bp->fw_ver_str, phy_ver); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index a2d25499a73c..bfda92ef1778 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -7,6 +7,7 @@ * the Free Software Foundation. */ +#include #include #include #include @@ -20,6 +21,8 @@ #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */ #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100) +static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen); + static u32 bnxt_get_msglevel(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); @@ -469,10 +472,20 @@ static void bnxt_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnxt *bp = netdev_priv(dev); + char *pkglog; + char *pkgver = NULL; + pkglog = kmalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); + if (pkglog) + pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version)); + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) + snprintf(info->fw_version, sizeof(info->fw_version) - 1, + "%s pkg %s", bp->fw_ver_str, pkgver); + else + strlcpy(info->fw_version, bp->fw_ver_str, + sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings; info->testinfo_len = BNXT_NUM_TESTS(bp); @@ -480,6 +493,7 @@ static void bnxt_get_drvinfo(struct net_device *dev, info->eedump_len = 0; /* TODO CHIMP FW: reg dump details */ info->regdump_len = 0; + kfree(pkglog); } static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) @@ -1111,6 +1125,85 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, return rc; } +static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, + u16 ext, u16 *index, u32 *item_length, + u32 *data_length) +{ + struct bnxt *bp = netdev_priv(dev); + int rc; + struct hwrm_nvm_find_dir_entry_input req = {0}; + struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); + req.enables = 0; + req.dir_idx = 0; + req.dir_type = cpu_to_le16(type); + req.dir_ordinal = cpu_to_le16(ordinal); + req.dir_ext = cpu_to_le16(ext); + req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == 0) { + if (index) + *index = le16_to_cpu(output->dir_idx); + if (item_length) + *item_length = le32_to_cpu(output->dir_item_length); + if (data_length) + *data_length = le32_to_cpu(output->dir_data_length); + } + return rc; +} + +static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) +{ + char *retval = NULL; + char *p; + char *value; + int field = 0; + + if (datalen < 1) + return NULL; + /* null-terminate the log data (removing last '\n'): */ + data[datalen - 1] = 0; + for (p = data; *p != 0; p++) { + field = 0; + retval = NULL; + while (*p != 0 && *p != '\n') { + value = p; + while (*p != 0 && *p != '\t' && *p != '\n') + p++; + if (field == desired_field) + retval = value; + if (*p != '\t') + break; + *p = 0; + field++; + p++; + } + if (*p == 0) + break; + *p = 0; + } + return retval; +} + +static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) +{ + u16 index = 0; + u32 datalen; + + if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, + &index, NULL, &datalen) != 0) + return NULL; + + memset(buf, 0, buflen); + if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) + return NULL; + + return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, + datalen); +} + static int bnxt_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h index 3cf3e1b70b64..43ef392c8588 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h @@ -50,10 +50,24 @@ enum bnxt_nvm_directory_type { #define BNX_DIR_ORDINAL_FIRST 0 +#define BNX_DIR_EXT_NONE 0 #define BNX_DIR_EXT_INACTIVE (1 << 0) #define BNX_DIR_EXT_UPDATE (1 << 1) +#define BNX_DIR_ATTR_NONE 0 #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) +#define BNX_PKG_LOG_MAX_LENGTH 4096 + +enum bnxnvm_pkglog_field_index { + BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, + BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, + BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2, + BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3, + BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5, + BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6 +}; + #endif /* Don't add anything after this line */ -- cgit v1.2.3 From fbfbc4851dd709cf1327afc283f9cca00235dcb3 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:07 -0500 Subject: bnxt_en: Refactor _hwrm_send_message(). Add a new function bnxt_do_send_msg() to do essentially the same thing with an additional paramter to silence error response messages. All current callers will set silent to false. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6e45a00905b9..48a7bbc7cc25 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2608,7 +2608,8 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); } -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, + int timeout, bool silent) { int i, intr_process, rc; struct input *req = msg; @@ -2686,13 +2687,16 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) } rc = le16_to_cpu(resp->error_code); - if (rc) { + if (rc && !silent) netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", le16_to_cpu(resp->req_type), le16_to_cpu(resp->seq_id), rc); - return rc; - } - return 0; + return rc; +} + +int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) +{ + return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); } int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -- cgit v1.2.3 From 90e209213096110bce06ef580e1c73702fe4a288 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Fri, 26 Feb 2016 04:00:08 -0500 Subject: bnxt_en: Add hwrm_send_message_silent(). This is used to send NVM_FIND_DIR_ENTRY messages which can return error if the entry is not found. This is normal and the error message will cause unnecessary alarm, so silence it. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 11 +++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 1 + drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 48a7bbc7cc25..aa6a3189caca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2709,6 +2709,17 @@ int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) return rc; } +int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, + int timeout) +{ + int rc; + + mutex_lock(&bp->hwrm_cmd_lock); + rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) { struct hwrm_func_drv_rgtr_input req = {0}; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3936e966595e..9aa38f57601b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1095,6 +1095,7 @@ void bnxt_set_ring_params(struct bnxt *); void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); int _hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int); +int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_func_qcaps(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index bfda92ef1778..84ea26d6f3ff 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1141,7 +1141,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, req.dir_ordinal = cpu_to_le16(ordinal); req.dir_ext = cpu_to_le16(ext); req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); -- cgit v1.2.3 From eaea34b23c46bf17b4a5638be69ab3561854f34b Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 26 Feb 2016 10:45:40 +0100 Subject: net/tun: implement ndo_set_rx_headroom ndo_set_rx_headroom controls the align value used by tun devices to allocate skbs on frame reception. When the xmit device adds a large encapsulation, this avoids an skb head reallocation on forwarding. The measured improvement when forwarding towards a vxlan dev with frame size below the egress device MTU is as follow: vxlan over ipv6, bridged: +6% vxlan over ipv6, ovs: +7% In case of ipv4 tunnels there is no improvement, since the tun device default alignment provides enough headroom to avoid the skb head reallocation. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- drivers/net/tun.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 88bb8cc3555b..afdf950617c3 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -187,6 +187,7 @@ struct tun_struct { #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ NETIF_F_TSO6|NETIF_F_UFO) + int align; int vnet_hdr_sz; int sndbuf; struct tap_filter txflt; @@ -934,6 +935,17 @@ static void tun_poll_controller(struct net_device *dev) return; } #endif + +static void tun_set_headroom(struct net_device *dev, int new_hr) +{ + struct tun_struct *tun = netdev_priv(dev); + + if (new_hr < NET_SKB_PAD) + new_hr = NET_SKB_PAD; + + tun->align = new_hr; +} + static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -945,6 +957,7 @@ static const struct net_device_ops tun_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = tun_poll_controller, #endif + .ndo_set_rx_headroom = tun_set_headroom, }; static const struct net_device_ops tap_netdev_ops = { @@ -962,6 +975,7 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_poll_controller = tun_poll_controller, #endif .ndo_features_check = passthru_features_check, + .ndo_set_rx_headroom = tun_set_headroom, }; static void tun_flow_init(struct tun_struct *tun) @@ -1086,7 +1100,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; struct sk_buff *skb; size_t total_len = iov_iter_count(from); - size_t len = total_len, align = NET_SKB_PAD, linear; + size_t len = total_len, align = tun->align, linear; struct virtio_net_hdr gso = { 0 }; int good_linear; int copylen; @@ -1694,6 +1708,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->txflt.count = 0; tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); + tun->align = NET_SKB_PAD; tun->filter_attached = false; tun->sndbuf = tfile->socket.sk->sk_sndbuf; -- cgit v1.2.3 From 163e529200af7b7521fbde5dbcc653cf3ce597df Mon Sep 17 00:00:00 2001 From: Paolo Abeni Date: Fri, 26 Feb 2016 10:45:41 +0100 Subject: veth: implement ndo_set_rx_headroom The rx headroom for veth dev is the peer device needed_headroom. Avoid ping-pong updates setting the private flag IFF_PHONY_HEADROOM. This avoids skb head reallocation when forwarding from a veth dev towards a device adding some kind of encapsulation. When transmitting frames below the MTU size towards a vxlan device, this gives about 10% performance speed-up when OVS is used to connect the veth and the vxlan device and a little more when using a plain Linux bridge. Signed-off-by: Paolo Abeni Signed-off-by: David S. Miller --- drivers/net/veth.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ba21d072be31..4f30a6ae50d0 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -35,6 +35,7 @@ struct pcpu_vstats { struct veth_priv { struct net_device __rcu *peer; atomic64_t dropped; + unsigned requested_headroom; }; /* @@ -271,6 +272,29 @@ static int veth_get_iflink(const struct net_device *dev) return iflink; } +static void veth_set_rx_headroom(struct net_device *dev, int new_hr) +{ + struct veth_priv *peer_priv, *priv = netdev_priv(dev); + struct net_device *peer; + + if (new_hr < 0) + new_hr = 0; + + rcu_read_lock(); + peer = rcu_dereference(priv->peer); + if (unlikely(!peer)) + goto out; + + peer_priv = netdev_priv(peer); + priv->requested_headroom = new_hr; + new_hr = max(priv->requested_headroom, peer_priv->requested_headroom); + dev->needed_headroom = new_hr; + peer->needed_headroom = new_hr; + +out: + rcu_read_unlock(); +} + static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, @@ -285,6 +309,7 @@ static const struct net_device_ops veth_netdev_ops = { #endif .ndo_get_iflink = veth_get_iflink, .ndo_features_check = passthru_features_check, + .ndo_set_rx_headroom = veth_set_rx_headroom, }; #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ @@ -301,6 +326,7 @@ static void veth_setup(struct net_device *dev) dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; dev->priv_flags |= IFF_NO_QUEUE; + dev->priv_flags |= IFF_PHONY_HEADROOM; dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; -- cgit v1.2.3 From 2b6ab0d3aae6bf1e08118060b0c5565778cd6b21 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Fri, 26 Feb 2016 07:54:13 -0800 Subject: net: cls_u32: move TC offload feature bit into cls_u32 offload logic In the original series drivers would get offload requests for cls_u32 rules even if the feature bit is disabled. This meant the driver had to do a boiler plate check on the feature bit before adding/deleting the rule. This patch lifts the check into the core code and removes it from the driver specific case. Signed-off-by: John Fastabend Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 3 --- include/net/pkt_cls.h | 3 +++ 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index cf4b729c92d7..b893ff8e65f5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8400,9 +8400,6 @@ int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) && tc->type == TC_SETUP_CLSU32) { - if (!(dev->features & NETIF_F_HW_TC)) - return -EINVAL; - switch (tc->cls_u32->command) { case TC_CLSU32_NEW_KNODE: case TC_CLSU32_REPLACE_KNODE: diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e64d20b81047..6096e96fb78b 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -394,6 +394,9 @@ struct tc_cls_u32_offload { static inline bool tc_should_offload(struct net_device *dev) { + if (!(dev->features & NETIF_F_HW_TC)) + return false; + return dev->netdev_ops->ndo_setup_tc; } -- cgit v1.2.3 From 09d4d087cd4869859fcc5dfc692f0830550a1b48 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 Feb 2016 17:32:24 +0100 Subject: mlx4: Implement devlink interface Implement newly introduced devlink interface. Add devlink port instances for every port and set the port types accordingly. Signed-off-by: Jiri Pirko v2->v3: -add dev param to devlink_register (api change) Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/main.c | 7 ++++ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 8 ++++- drivers/net/ethernet/mellanox/mlx4/intf.c | 9 ++++++ drivers/net/ethernet/mellanox/mlx4/main.c | 44 +++++++++++++++++++------- drivers/net/ethernet/mellanox/mlx4/mlx4.h | 2 ++ include/linux/mlx4/driver.h | 3 ++ 6 files changed, 60 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1c7ab6cabbb8..a15a7b37d386 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -2519,6 +2520,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) } ibdev->ib_active = true; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i), + &ibdev->ib_dev); if (mlx4_is_mfunc(ibdev->dev)) init_pkeys(ibdev); @@ -2643,7 +2647,10 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) { struct mlx4_ib_dev *ibdev = ibdev_ptr; int p; + int i; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) + devlink_port_type_clear(mlx4_get_devlink_port(dev, i)); ibdev->ib_active = false; flush_workqueue(wq); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 96d95cb36c52..e26b110e27da 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -2033,8 +2034,11 @@ void mlx4_en_destroy_netdev(struct net_device *dev) en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); /* Unregister device - this will close the port if it was up */ - if (priv->registered) + if (priv->registered) { + devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev, + priv->port)); unregister_netdev(dev); + } if (priv->allocated) mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); @@ -3051,6 +3055,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } priv->registered = 1; + devlink_port_type_eth_set(mlx4_get_devlink_port(mdev->dev, priv->port), + dev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 0472941af820..dec77d6f0ac9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -34,6 +34,7 @@ #include #include #include +#include #include "mlx4.h" @@ -249,3 +250,11 @@ void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int return result; } EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev); + +struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + + return &info->devlink_port; +} +EXPORT_SYMBOL_GPL(mlx4_get_devlink_port); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 2cc3c626c3fe..4f5cfe4989ce 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -2881,8 +2882,13 @@ no_msi: static int mlx4_init_port_info(struct mlx4_dev *dev, int port) { + struct devlink *devlink = priv_to_devlink(mlx4_priv(dev)); struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; - int err = 0; + int err; + + err = devlink_port_register(devlink, &info->devlink_port, port); + if (err) + return err; info->dev = dev; info->port = port; @@ -2907,6 +2913,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); if (err) { mlx4_err(dev, "Failed to create file for port %d\n", port); + devlink_port_unregister(&info->devlink_port); info->port = -1; } @@ -3680,21 +3687,23 @@ err_disable_pdev: static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { + struct devlink *devlink; struct mlx4_priv *priv; struct mlx4_dev *dev; int ret; printk_once(KERN_INFO "%s", mlx4_version); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) + devlink = devlink_alloc(NULL, sizeof(*priv)); + if (!devlink) return -ENOMEM; + priv = devlink_priv(devlink); dev = &priv->dev; dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); if (!dev->persist) { - kfree(priv); - return -ENOMEM; + ret = -ENOMEM; + goto err_devlink_free; } dev->persist->pdev = pdev; dev->persist->dev = dev; @@ -3703,14 +3712,23 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->device_state_mutex); mutex_init(&dev->persist->interface_state_mutex); + ret = devlink_register(devlink, &pdev->dev); + if (ret) + goto err_persist_free; + ret = __mlx4_init_one(pdev, id->driver_data, priv); - if (ret) { - kfree(dev->persist); - kfree(priv); - } else { - pci_save_state(pdev); - } + if (ret) + goto err_devlink_unregister; + pci_save_state(pdev); + return 0; + +err_devlink_unregister: + devlink_unregister(devlink); +err_persist_free: + kfree(dev->persist); +err_devlink_free: + devlink_free(devlink); return ret; } @@ -3811,6 +3829,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); struct mlx4_dev *dev = persist->dev; struct mlx4_priv *priv = mlx4_priv(dev); + struct devlink *devlink = priv_to_devlink(priv); int active_vfs = 0; mutex_lock(&persist->interface_state_mutex); @@ -3841,8 +3860,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_disable_device(pdev); + devlink_unregister(devlink); kfree(dev->persist); - kfree(priv); + devlink_free(devlink); pci_set_drvdata(pdev, NULL); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 7baef52db6b7..ef9683101ead 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -828,6 +829,7 @@ struct mlx4_port_info { struct mlx4_roce_gid_table gid_table; int base_qpn; struct cpu_rmap *rmap; + struct devlink_port devlink_port; }; struct mlx4_sense { diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h index 2e8af001c5da..bd0e7075ea6d 100644 --- a/include/linux/mlx4/driver.h +++ b/include/linux/mlx4/driver.h @@ -33,6 +33,7 @@ #ifndef MLX4_DRIVER_H #define MLX4_DRIVER_H +#include #include struct mlx4_dev; @@ -89,6 +90,8 @@ int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p); void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port); +struct devlink_port *mlx4_get_devlink_port(struct mlx4_dev *dev, int port); + static inline u64 mlx4_mac_to_u64(u8 *addr) { u64 mac = 0; -- cgit v1.2.3 From b2facd95ab965465373fc5ded7218c8c4fa8da7d Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 Feb 2016 17:32:25 +0100 Subject: mlx4: Implement port type setting via devlink interface So far, there has been an mlx4-specific sysfs file allowing user to change port type to either Ethernet of InfiniBand. This is very inconvenient. Allow to expose the same ability to set port type in a generic way using devlink interface. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 86 +++++++++++++++++++++++-------- 1 file changed, 65 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 4f5cfe4989ce..b8a51515e73c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1082,36 +1082,20 @@ static ssize_t show_port_type(struct device *dev, return strlen(buf); } -static ssize_t set_port_type(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static int __set_port_type(struct mlx4_port_info *info, + enum mlx4_port_type port_type) { - struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, - port_attr); struct mlx4_dev *mdev = info->dev; struct mlx4_priv *priv = mlx4_priv(mdev); enum mlx4_port_type types[MLX4_MAX_PORTS]; enum mlx4_port_type new_types[MLX4_MAX_PORTS]; - static DEFINE_MUTEX(set_port_type_mutex); int i; int err = 0; - mutex_lock(&set_port_type_mutex); - - if (!strcmp(buf, "ib\n")) - info->tmp_type = MLX4_PORT_TYPE_IB; - else if (!strcmp(buf, "eth\n")) - info->tmp_type = MLX4_PORT_TYPE_ETH; - else if (!strcmp(buf, "auto\n")) - info->tmp_type = MLX4_PORT_TYPE_AUTO; - else { - mlx4_err(mdev, "%s is not supported port type\n", buf); - err = -EINVAL; - goto err_out; - } - mlx4_stop_sense(mdev); mutex_lock(&priv->port_mutex); + info->tmp_type = port_type; + /* Possible type is always the one that was delivered */ mdev->caps.possible_type[info->port] = info->tmp_type; @@ -1153,6 +1137,37 @@ static ssize_t set_port_type(struct device *dev, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); + + return err; +} + +static ssize_t set_port_type(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_attr); + struct mlx4_dev *mdev = info->dev; + enum mlx4_port_type port_type; + static DEFINE_MUTEX(set_port_type_mutex); + int err; + + mutex_lock(&set_port_type_mutex); + + if (!strcmp(buf, "ib\n")) { + port_type = MLX4_PORT_TYPE_IB; + } else if (!strcmp(buf, "eth\n")) { + port_type = MLX4_PORT_TYPE_ETH; + } else if (!strcmp(buf, "auto\n")) { + port_type = MLX4_PORT_TYPE_AUTO; + } else { + mlx4_err(mdev, "%s is not supported port type\n", buf); + err = -EINVAL; + goto err_out; + } + + err = __set_port_type(info, port_type); + err_out: mutex_unlock(&set_port_type_mutex); @@ -3685,6 +3700,35 @@ err_disable_pdev: return err; } +static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port, + enum devlink_port_type port_type) +{ + struct mlx4_port_info *info = container_of(devlink_port, + struct mlx4_port_info, + devlink_port); + enum mlx4_port_type mlx4_port_type; + + switch (port_type) { + case DEVLINK_PORT_TYPE_AUTO: + mlx4_port_type = MLX4_PORT_TYPE_AUTO; + break; + case DEVLINK_PORT_TYPE_ETH: + mlx4_port_type = MLX4_PORT_TYPE_ETH; + break; + case DEVLINK_PORT_TYPE_IB: + mlx4_port_type = MLX4_PORT_TYPE_IB; + break; + default: + return -EOPNOTSUPP; + } + + return __set_port_type(info, mlx4_port_type); +} + +static const struct devlink_ops mlx4_devlink_ops = { + .port_type_set = mlx4_devlink_port_type_set, +}; + static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { struct devlink *devlink; @@ -3694,7 +3738,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) printk_once(KERN_INFO "%s", mlx4_version); - devlink = devlink_alloc(NULL, sizeof(*priv)); + devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv)); if (!devlink) return -ENOMEM; priv = devlink_priv(devlink); -- cgit v1.2.3 From c4745500e988b393189703bade6c3ec65194e24c Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 Feb 2016 17:32:26 +0100 Subject: mlxsw: Implement devlink interface Implement newly introduced devlink interface. Add devlink port instances for every port and set the port types accordingly. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 23 +++++++++++++++++------ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 19 +++++++++++++++++++ drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/switchx2.c | 20 ++++++++++++++++++++ 4 files changed, 58 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 22379eb8e924..a4026a4ef893 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -56,6 +56,7 @@ #include #include #include +#include #include "core.h" #include "item.h" @@ -791,6 +792,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; struct mlxsw_driver *mlxsw_driver; + struct devlink *devlink; size_t alloc_size; int err; @@ -798,12 +800,13 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (!mlxsw_driver) return -EINVAL; alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; - mlxsw_core = kzalloc(alloc_size, GFP_KERNEL); - if (!mlxsw_core) { + devlink = devlink_alloc(NULL, alloc_size); + if (!devlink) { err = -ENOMEM; - goto err_core_alloc; + goto err_devlink_alloc; } + mlxsw_core = devlink_priv(devlink); INIT_LIST_HEAD(&mlxsw_core->rx_listener_list); INIT_LIST_HEAD(&mlxsw_core->event_listener_list); mlxsw_core->driver = mlxsw_driver; @@ -841,6 +844,10 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (err) goto err_hwmon_init; + err = devlink_register(devlink, mlxsw_bus_info->dev); + if (err) + goto err_devlink_register; + err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core, mlxsw_bus_info); if (err) @@ -855,6 +862,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, err_debugfs_init: mlxsw_core->driver->fini(mlxsw_core->driver_priv); err_driver_init: + devlink_unregister(devlink); +err_devlink_register: err_hwmon_init: mlxsw_emad_fini(mlxsw_core); err_emad_init: @@ -864,8 +873,8 @@ err_bus_init: err_alloc_lag_mapping: free_percpu(mlxsw_core->pcpu_stats); err_alloc_stats: - kfree(mlxsw_core); -err_core_alloc: + devlink_free(devlink); +err_devlink_alloc: mlxsw_core_driver_put(device_kind); return err; } @@ -874,14 +883,16 @@ EXPORT_SYMBOL(mlxsw_core_bus_device_register); void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core) { const char *device_kind = mlxsw_core->bus_info->device_kind; + struct devlink *devlink = priv_to_devlink(mlxsw_core); mlxsw_core_debugfs_fini(mlxsw_core); mlxsw_core->driver->fini(mlxsw_core->driver_priv); + devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); mlxsw_core->bus->fini(mlxsw_core->bus_priv); kfree(mlxsw_core->lag.mapping); free_percpu(mlxsw_core->pcpu_stats); - kfree(mlxsw_core); + devlink_free(devlink); mlxsw_core_driver_put(device_kind); } EXPORT_SYMBOL(mlxsw_core_bus_device_unregister); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 09ce451c283b..20e67835aae7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -1351,7 +1352,9 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) { + struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_port *mlxsw_sp_port; + struct devlink_port *devlink_port; struct net_device *dev; bool usable; size_t bytes; @@ -1417,6 +1420,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) goto port_not_usable; } + devlink_port = &mlxsw_sp_port->devlink_port; + err = devlink_port_register(devlink, devlink_port, local_port); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n", + mlxsw_sp_port->local_port); + goto err_devlink_port_register; + } + err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1457,6 +1468,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) goto err_register_netdev; } + devlink_port_type_eth_set(devlink_port, dev); + err = mlxsw_sp_port_vlan_init(mlxsw_sp_port); if (err) goto err_port_vlan_init; @@ -1472,6 +1485,8 @@ err_port_admin_status_set: err_port_mtu_set: err_port_swid_set: err_port_system_port_mapping_set: + devlink_port_unregister(&mlxsw_sp_port->devlink_port); +err_devlink_port_register: port_not_usable: err_port_module_check: err_dev_addr_init: @@ -1505,10 +1520,14 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port]; + struct devlink_port *devlink_port; if (!mlxsw_sp_port) return; + devlink_port = &mlxsw_sp_port->devlink_port; + devlink_port_type_clear(devlink_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + devlink_port_unregister(devlink_port); mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3b89ed2f3c76..6bf6daca8643 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -43,6 +43,7 @@ #include #include #include +#include #include "port.h" #include "core.h" @@ -162,6 +163,7 @@ struct mlxsw_sp_port { unsigned long *untagged_vlans; /* VLAN interfaces */ struct list_head vports_list; + struct devlink_port devlink_port; }; static inline struct mlxsw_sp_port * diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c index d85960cfb694..7a60a26759b6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -78,6 +79,7 @@ struct mlxsw_sx_port { struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats; struct mlxsw_sx *mlxsw_sx; u8 local_port; + struct devlink_port devlink_port; }; /* tx_hdr_version @@ -953,7 +955,9 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port, static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) { + struct devlink *devlink = priv_to_devlink(mlxsw_sx->core); struct mlxsw_sx_port *mlxsw_sx_port; + struct devlink_port *devlink_port; struct net_device *dev; bool usable; int err; @@ -1007,6 +1011,14 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto port_not_usable; } + devlink_port = &mlxsw_sx_port->devlink_port; + err = devlink_port_register(devlink, devlink_port, local_port); + if (err) { + dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n", + mlxsw_sx_port->local_port); + goto err_devlink_port_register; + } + err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port); if (err) { dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n", @@ -1064,6 +1076,8 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port) goto err_register_netdev; } + devlink_port_type_eth_set(devlink_port, dev); + mlxsw_sx->ports[local_port] = mlxsw_sx_port; return 0; @@ -1075,6 +1089,8 @@ err_port_mtu_set: err_port_speed_set: err_port_swid_set: err_port_system_port_mapping_set: + devlink_port_unregister(&mlxsw_sx_port->devlink_port); +err_devlink_port_register: port_not_usable: err_port_module_check: err_dev_addr_get: @@ -1087,11 +1103,15 @@ err_alloc_stats: static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port) { struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port]; + struct devlink_port *devlink_port; if (!mlxsw_sx_port) return; + devlink_port = &mlxsw_sx_port->devlink_port; + devlink_port_type_clear(devlink_port); unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */ mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT); + devlink_port_unregister(devlink_port); free_percpu(mlxsw_sx_port->pcpu_stats); free_netdev(mlxsw_sx_port->dev); } -- cgit v1.2.3 From 284ef8035708334df99d038da1d3c7ddf77ec1bd Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Fri, 26 Feb 2016 17:32:27 +0100 Subject: mlxsw: core: Add devlink port splitter callbacks Add middle layer in mlxsw core code to forward port split/unsplit calls into specific ASIC drivers. Signed-off-by: Jiri Pirko Signed-off-by: Ido Schimmel Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/core.c | 34 +++++++++++++++++++++++++++++- drivers/net/ethernet/mellanox/mlxsw/core.h | 2 ++ 2 files changed, 35 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index a4026a4ef893..f69f6280519f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -785,6 +785,38 @@ static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core) debugfs_remove_recursive(mlxsw_core->dbg_dir); } +static int mlxsw_devlink_port_split(struct devlink *devlink, + unsigned int port_index, + unsigned int count) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + if (port_index >= MLXSW_PORT_MAX_PORTS) + return -EINVAL; + if (!mlxsw_core->driver->port_split) + return -EOPNOTSUPP; + return mlxsw_core->driver->port_split(mlxsw_core->driver_priv, + port_index, count); +} + +static int mlxsw_devlink_port_unsplit(struct devlink *devlink, + unsigned int port_index) +{ + struct mlxsw_core *mlxsw_core = devlink_priv(devlink); + + if (port_index >= MLXSW_PORT_MAX_PORTS) + return -EINVAL; + if (!mlxsw_core->driver->port_unsplit) + return -EOPNOTSUPP; + return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv, + port_index); +} + +static const struct devlink_ops mlxsw_devlink_ops = { + .port_split = mlxsw_devlink_port_split, + .port_unsplit = mlxsw_devlink_port_unsplit, +}; + int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, const struct mlxsw_bus *mlxsw_bus, void *bus_priv) @@ -800,7 +832,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (!mlxsw_driver) return -EINVAL; alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; - devlink = devlink_alloc(NULL, alloc_size); + devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); if (!devlink) { err = -ENOMEM; goto err_devlink_alloc; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index a01723600f0a..c73d1c0792a6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -186,6 +186,8 @@ struct mlxsw_driver { int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info); void (*fini)(void *driver_priv); + int (*port_split)(void *driver_priv, u8 local_port, unsigned int count); + int (*port_unsplit)(void *driver_priv, u8 local_port); void (*txhdr_construct)(struct sk_buff *skb, const struct mlxsw_tx_info *tx_info); u8 txhdr_len; -- cgit v1.2.3 From 3e9b27b8fc8b00c9edaaa5fd64636e6f2b331f43 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 26 Feb 2016 17:32:28 +0100 Subject: mlxsw: spectrum: Unmap local port from module during teardown When splitting a port we replace it with 2 or 4 other ports. To be able to do that we need to remove the original port netdev and unmap it from its module. However, we first mark it as disabled, as active ports cannot be unmapped. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 20e67835aae7..0dd72f85791e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -320,6 +320,15 @@ static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } +static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + mlxsw_reg_pmlp_width_set(pmlp_pl, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); +} + static int mlxsw_sp_port_open(struct net_device *dev) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -1530,6 +1539,8 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) devlink_port_unregister(devlink_port); mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); + mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT); + mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port); free_percpu(mlxsw_sp_port->pcpu_stats); kfree(mlxsw_sp_port->untagged_vlans); kfree(mlxsw_sp_port->active_vlans); -- cgit v1.2.3 From 558c2d5e52142fc503f79bec89413724cfc37bdd Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 26 Feb 2016 17:32:29 +0100 Subject: mlxsw: spectrum: Store local port to module mapping during init The port netdevs are each associated with a different local port number in the device. These local ports are grouped into groups of 4 (e.g. (1-4), (5-8)) called clusters. The cluster constitutes the one of two possible modules they can be mapped to. This mapping is board-specific and done by the device's firmware during init. When splitting a port by 4, the device requires us to first unmap all the ports in the cluster and then map each to a single lane in the module associated with the port netdev used as the handle for the operation. This means that two port netdevs will disappear, as only 100Gb/s (4 lanes) ports can be split and we are guaranteed to have two of these ((1, 3), (5, 7) etc.) in a cluster. When unsplit occurs we need to reinstantiate the two original 100Gb/s ports and map each to its origianl module. Therefore, during driver init store the initial local port to module mapping, so it can be used later during unsplitting. Note that a by 2 split doesn't require us to store the mapping, as we only need to reinstantiate one port whose module is known. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 36 +++++++++++--------------- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 1 + 2 files changed, 16 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 0dd72f85791e..240881c8dec2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -305,18 +305,19 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); } -static int mlxsw_sp_port_module_check(struct mlxsw_sp_port *mlxsw_sp_port, - bool *p_usable) +static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, + u8 local_port, u8 *p_module, + u8 *p_width) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmlp_pl[MLXSW_REG_PMLP_LEN]; int err; - mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port); + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); if (err) return err; - *p_usable = mlxsw_reg_pmlp_width_get(pmlp_pl) ? true : false; + *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0); + *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl); return 0; } @@ -1365,7 +1366,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) struct mlxsw_sp_port *mlxsw_sp_port; struct devlink_port *devlink_port; struct net_device *dev; - bool usable; size_t bytes; int err; @@ -1416,19 +1416,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) */ dev->hard_header_len += MLXSW_TXHDR_LEN; - err = mlxsw_sp_port_module_check(mlxsw_sp_port, &usable); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to check module\n", - mlxsw_sp_port->local_port); - goto err_port_module_check; - } - - if (!usable) { - dev_dbg(mlxsw_sp->bus_info->dev, "Port %d: Not usable, skipping initialization\n", - mlxsw_sp_port->local_port); - goto port_not_usable; - } - devlink_port = &mlxsw_sp_port->devlink_port; err = devlink_port_register(devlink, devlink_port, local_port); if (err) { @@ -1496,8 +1483,6 @@ err_port_swid_set: err_port_system_port_mapping_set: devlink_port_unregister(&mlxsw_sp_port->devlink_port); err_devlink_port_register: -port_not_usable: -err_port_module_check: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: @@ -1559,6 +1544,7 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) { size_t alloc_size; + u8 module, width; int i; int err; @@ -1568,6 +1554,13 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) return -ENOMEM; for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { + err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, + &width); + if (err) + goto err_port_module_info_get; + if (!width) + continue; + mlxsw_sp->port_to_module[i] = module; err = mlxsw_sp_port_create(mlxsw_sp, i); if (err) goto err_port_create; @@ -1575,6 +1568,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) return 0; err_port_create: +err_port_module_info_get: for (i--; i >= 1; i--) mlxsw_sp_port_remove(mlxsw_sp, i); kfree(mlxsw_sp->ports); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 6bf6daca8643..a7d86ac033f8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -123,6 +123,7 @@ struct mlxsw_sp { u32 ageing_time; struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; + u8 port_to_module[MLXSW_PORT_MAX_PORTS]; }; static inline struct mlxsw_sp_upper * -- cgit v1.2.3 From a133318cde2000a3264032ea3b561c9054613486 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 26 Feb 2016 17:32:30 +0100 Subject: mlxsw: spectrum: Mark unused ports using NULL When splitting and unsplitting we'll destroy usable ports on the fly, so mark them using a NULL pointer to indicate that their local port number is free and can be re-used. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 240881c8dec2..926019e86c36 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1518,6 +1518,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) if (!mlxsw_sp_port) return; + mlxsw_sp->ports[local_port] = NULL; devlink_port = &mlxsw_sp_port->devlink_port; devlink_port_type_clear(devlink_port); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ -- cgit v1.2.3 From 18f1e70c413713f28629ffe6863a2c43248ff7a3 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Fri, 26 Feb 2016 17:32:31 +0100 Subject: mlxsw: spectrum: Introduce port splitting Allow a user to split or unsplit a port using the newly introduced devlink ops. Once split, the original netdev is destroyed and 2 or 4 others are created, according to user configuration. The new ports are like any other port, with the sole difference of supporting a lower maximum speed. When unsplit, the reverse process takes place. Signed-off-by: Ido Schimmel Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/port.h | 2 + drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 213 ++++++++++++++++++++++++- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 7 +- 3 files changed, 219 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index ae65b9940aed..f33b997f2b61 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -59,6 +59,8 @@ #define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS) +#define MLXSW_PORT_MODULE_MAX_WIDTH 4 + enum mlxsw_port_admin_status { MLXSW_PORT_ADMIN_STATUS_UP = 1, MLXSW_PORT_ADMIN_STATUS_DOWN = 2, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 926019e86c36..53487d3eb9f6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -321,6 +321,22 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, return 0; } +static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, + u8 module, u8 width, u8 lane) +{ + char pmlp_pl[MLXSW_REG_PMLP_LEN]; + int i; + + mlxsw_reg_pmlp_pack(pmlp_pl, local_port); + mlxsw_reg_pmlp_width_set(pmlp_pl, width); + for (i = 0; i < width; i++) { + mlxsw_reg_pmlp_module_set(pmlp_pl, i, module); + mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */ + } + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl); +} + static int mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port) { char pmlp_pl[MLXSW_REG_PMLP_LEN]; @@ -1284,6 +1300,18 @@ static u32 mlxsw_sp_to_ptys_speed(u32 speed) return ptys_proto; } +static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed) +{ + u32 ptys_proto = 0; + int i; + + for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) { + if (mlxsw_sp_port_link_mode[i].speed <= upper_speed) + ptys_proto |= mlxsw_sp_port_link_mode[i].mask; + } + return ptys_proto; +} + static int mlxsw_sp_port_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { @@ -1360,7 +1388,22 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .set_settings = mlxsw_sp_port_set_settings, }; -static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) +static int +mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width; + char ptys_pl[MLXSW_REG_PTYS_LEN]; + u32 eth_proto_admin; + + eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed); + mlxsw_reg_ptys_pack(ptys_pl, mlxsw_sp_port->local_port, + eth_proto_admin); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl); +} + +static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, + bool split, u8 module, u8 width) { struct devlink *devlink = priv_to_devlink(mlxsw_sp->core); struct mlxsw_sp_port *mlxsw_sp_port; @@ -1376,6 +1419,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; + mlxsw_sp_port->split = split; bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); if (!mlxsw_sp_port->active_vlans) { @@ -1417,6 +1461,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) dev->hard_header_len += MLXSW_TXHDR_LEN; devlink_port = &mlxsw_sp_port->devlink_port; + if (mlxsw_sp_port->split) + devlink_port_split_set(devlink_port, module); err = devlink_port_register(devlink, devlink_port, local_port); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n", @@ -1438,6 +1484,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) goto err_port_swid_set; } + err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n", + mlxsw_sp_port->local_port); + goto err_port_speed_by_width_set; + } + err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", @@ -1479,6 +1532,7 @@ err_register_netdev: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: +err_port_speed_by_width_set: err_port_swid_set: err_port_system_port_mapping_set: devlink_port_unregister(&mlxsw_sp_port->devlink_port); @@ -1494,6 +1548,28 @@ err_port_active_vlans_alloc: return err; } +static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, + bool split, u8 module, u8 width, u8 lane) +{ + int err; + + err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, + lane); + if (err) + return err; + + err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, + width); + if (err) + goto err_port_create; + + return 0; + +err_port_create: + mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); + return err; +} + static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; @@ -1562,7 +1638,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) if (!width) continue; mlxsw_sp->port_to_module[i] = module; - err = mlxsw_sp_port_create(mlxsw_sp, i); + err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); if (err) goto err_port_create; } @@ -1576,6 +1652,137 @@ err_port_module_info_get: return err; } +static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) +{ + u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX; + + return local_port - offset; +} + +static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port; + u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; + u8 module, cur_width, base_port; + int i; + int err; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + + if (count != 2 && count != 4) { + netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, + &cur_width); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); + return err; + } + + if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { + netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); + return -EINVAL; + } + + /* Make sure we have enough slave (even) ports for the split. */ + if (count == 2) { + base_port = local_port; + if (mlxsw_sp->ports[base_port + 1]) { + netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + return -EINVAL; + } + } else { + base_port = mlxsw_sp_cluster_base_port_get(local_port); + if (mlxsw_sp->ports[base_port + 1] || + mlxsw_sp->ports[base_port + 3]) { + netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n"); + return -EINVAL; + } + } + + for (i = 0; i < count; i++) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + + for (i = 0; i < count; i++) { + err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, + module, width, i * width); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); + goto err_port_create; + } + } + + return 0; + +err_port_create: + for (i--; i >= 0; i--) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + for (i = 0; i < count / 2; i++) { + module = mlxsw_sp->port_to_module[base_port + i * 2]; + mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, + module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); + } + return err; +} + +static int mlxsw_sp_port_unsplit(void *priv, u8 local_port) +{ + struct mlxsw_sp *mlxsw_sp = priv; + struct mlxsw_sp_port *mlxsw_sp_port; + u8 module, cur_width, base_port; + unsigned int count; + int i; + int err; + + mlxsw_sp_port = mlxsw_sp->ports[local_port]; + if (!mlxsw_sp_port) { + dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", + local_port); + return -EINVAL; + } + + if (!mlxsw_sp_port->split) { + netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n"); + return -EINVAL; + } + + err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, + &cur_width); + if (err) { + netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); + return err; + } + count = cur_width == 1 ? 4 : 2; + + base_port = mlxsw_sp_cluster_base_port_get(local_port); + + /* Determine which ports to remove. */ + if (count == 2 && local_port >= base_port + 2) + base_port = base_port + 2; + + for (i = 0; i < count; i++) + mlxsw_sp_port_remove(mlxsw_sp, base_port + i); + + for (i = 0; i < count / 2; i++) { + module = mlxsw_sp->port_to_module[base_port + i * 2]; + err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, + module, MLXSW_PORT_MODULE_MAX_WIDTH, + 0); + if (err) + dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); + } + + return 0; +} + static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg, char *pude_pl, void *priv) { @@ -1999,6 +2206,8 @@ static struct mlxsw_driver mlxsw_sp_driver = { .priv_size = sizeof(struct mlxsw_sp), .init = mlxsw_sp_init, .fini = mlxsw_sp_fini, + .port_split = mlxsw_sp_port_split, + .port_unsplit = mlxsw_sp_port_unsplit, .txhdr_construct = mlxsw_sp_txhdr_construct, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp_config_profile, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a7d86ac033f8..1b691d7e4a2a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -58,6 +58,10 @@ #define MLXSW_SP_MID_MAX 7000 +#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4 + +#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */ + struct mlxsw_sp_port; struct mlxsw_sp_upper { @@ -151,7 +155,8 @@ struct mlxsw_sp_port { learning_sync:1, uc_flood:1, bridged:1, - lagged:1; + lagged:1, + split:1; u16 pvid; u16 lag_id; struct { -- cgit v1.2.3 From 2fb5ef09de7c8ab30f7aa315e700a46f51ac4b98 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:01 -0500 Subject: net: dsa: mv88e6xxx: extract single VLAN retrieval Rename _mv88e6xxx_vlan_init in _mv88e6xxx_vtu_new, eventually called from a new _mv88e6xxx_vtu_get function, which abstracts the VTU GetNext VID-1 trick to retrieve a single entry. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 55 ++++++++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index d98dc635b00b..e9e99222399a 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1458,8 +1458,8 @@ loadpurge: return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE); } -static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, - struct mv88e6xxx_vtu_stu_entry *entry) +static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct mv88e6xxx_vtu_stu_entry vlan = { @@ -1509,6 +1509,35 @@ static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid, return 0; } +static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid, + struct mv88e6xxx_vtu_stu_entry *entry, bool creat) +{ + int err; + + if (!vid) + return -EINVAL; + + err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); + if (err) + return err; + + err = _mv88e6xxx_vtu_getnext(ds, entry); + if (err) + return err; + + if (entry->vid != vid || !entry->valid) { + if (!creat) + return -EOPNOTSUPP; + /* -ENOENT would've been more appropriate, but switchdev expects + * -EOPNOTSUPP to inform bridge about an eventual software VLAN. + */ + + err = _mv88e6xxx_vtu_new(ds, vid, entry); + } + + return err; +} + static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, u16 vid_begin, u16 vid_end) { @@ -1593,20 +1622,10 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid, struct mv88e6xxx_vtu_stu_entry vlan; int err; - err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); - if (err) - return err; - - err = _mv88e6xxx_vtu_getnext(ds, &vlan); + err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true); if (err) return err; - if (vlan.vid != vid || !vlan.valid) { - err = _mv88e6xxx_vlan_init(ds, vid, &vlan); - if (err) - return err; - } - vlan.data[port] = untagged ? GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED : GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED; @@ -1647,16 +1666,12 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid) struct mv88e6xxx_vtu_stu_entry vlan; int i, err; - err = _mv88e6xxx_vtu_vid_write(ds, vid - 1); - if (err) - return err; - - err = _mv88e6xxx_vtu_getnext(ds, &vlan); + err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false); if (err) return err; - if (vlan.vid != vid || !vlan.valid || - vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) + /* Tell switchdev if this VLAN is handled in software */ + if (vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) return -EOPNOTSUPP; vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; -- cgit v1.2.3 From 74b6ba0d76acd3ce5edac54aa73c4eb7e3a93859 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:02 -0500 Subject: net: dsa: mv88e6xxx: extract single FDB dump Move out the code which dumps a single FDB to its own function. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 79 ++++++++++++++++++++++++++------------------- 1 file changed, 46 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index e9e99222399a..63295169f4e7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1895,6 +1895,47 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, return 0; } +static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid, + int port, + struct switchdev_obj_port_fdb *fdb, + int (*cb)(struct switchdev_obj *obj)) +{ + struct mv88e6xxx_atu_entry addr = { + .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, + }; + int err; + + err = _mv88e6xxx_atu_mac_write(ds, addr.mac); + if (err) + return err; + + do { + err = _mv88e6xxx_atu_getnext(ds, fid, &addr); + if (err) + break; + + if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) + break; + + if (!addr.trunk && addr.portv_trunkid & BIT(port)) { + bool is_static = addr.state == + (is_multicast_ether_addr(addr.mac) ? + GLOBAL_ATU_DATA_STATE_MC_STATIC : + GLOBAL_ATU_DATA_STATE_UC_STATIC); + + fdb->vid = vid; + ether_addr_copy(fdb->addr, addr.mac); + fdb->ndm_state = is_static ? NUD_NOARP : NUD_REACHABLE; + + err = cb(&fdb->obj); + if (err) + break; + } + } while (!is_broadcast_ether_addr(addr.mac)); + + return err; +} + int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, struct switchdev_obj_port_fdb *fdb, int (*cb)(struct switchdev_obj *obj)) @@ -1907,51 +1948,23 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, mutex_lock(&ps->smi_mutex); + /* Dump VLANs' Filtering Information Databases */ err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid); if (err) goto unlock; do { - struct mv88e6xxx_atu_entry addr = { - .mac = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, - }; - err = _mv88e6xxx_vtu_getnext(ds, &vlan); if (err) - goto unlock; + break; if (!vlan.valid) break; - err = _mv88e6xxx_atu_mac_write(ds, addr.mac); + err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port, + fdb, cb); if (err) - goto unlock; - - do { - err = _mv88e6xxx_atu_getnext(ds, vlan.fid, &addr); - if (err) - goto unlock; - - if (addr.state == GLOBAL_ATU_DATA_STATE_UNUSED) - break; - - if (!addr.trunk && addr.portv_trunkid & BIT(port)) { - bool is_static = addr.state == - (is_multicast_ether_addr(addr.mac) ? - GLOBAL_ATU_DATA_STATE_MC_STATIC : - GLOBAL_ATU_DATA_STATE_UC_STATIC); - - fdb->vid = vlan.vid; - ether_addr_copy(fdb->addr, addr.mac); - fdb->ndm_state = is_static ? NUD_NOARP : - NUD_REACHABLE; - - err = cb(&fdb->obj); - if (err) - goto unlock; - } - } while (!is_broadcast_ether_addr(addr.mac)); - + break; } while (vlan.vid < GLOBAL_VTU_VID_MASK); unlock: -- cgit v1.2.3 From 3285f9e8695b26b1e15a58f21aff21087a7e7555 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:03 -0500 Subject: net: dsa: mv88e6xxx: assign dynamic FDB to VLANs Add a _mv88e6xxx_fid_new function which gives and flushes the lowest FID available. Call it when preparing a new VTU entry. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 56 +++++++++++++++++++++++++++++++++++++-------- drivers/net/dsa/mv88e6xxx.h | 2 ++ 2 files changed, 49 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 63295169f4e7..b4b2f05134ba 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1458,6 +1458,41 @@ loadpurge: return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE); } +static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) +{ + DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); + + /* Set every FID bit used by the VLAN entries */ + err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK); + if (err) + return err; + + do { + err = _mv88e6xxx_vtu_getnext(ds, &vlan); + if (err) + return err; + + if (!vlan.valid) + break; + + set_bit(vlan.fid, fid_bitmap); + } while (vlan.vid < GLOBAL_VTU_VID_MASK); + + /* The reset value 0x000 is used to indicate that multiple address + * databases are not needed. Return the next positive available. + */ + *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); + if (unlikely(*fid == MV88E6XXX_N_FID)) + return -ENOSPC; + + /* Clear the database */ + return _mv88e6xxx_atu_flush(ds, *fid, true); +} + static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, struct mv88e6xxx_vtu_stu_entry *entry) { @@ -1465,9 +1500,12 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, struct mv88e6xxx_vtu_stu_entry vlan = { .valid = true, .vid = vid, - .fid = vid, /* We use one FID per VLAN */ }; - int i; + int i, err; + + err = _mv88e6xxx_fid_new(ds, &vlan.fid); + if (err) + return err; /* exclude all ports except the CPU and DSA ports */ for (i = 0; i < ps->num_ports; ++i) @@ -1478,7 +1516,6 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { struct mv88e6xxx_vtu_stu_entry vstp; - int err; /* Adding a VTU entry requires a valid STU entry. As VSTP is not * implemented, only one STU entry is needed to cover all VTU @@ -1498,11 +1535,6 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid, if (err) return err; } - - /* Clear all MAC addresses from the new database */ - err = _mv88e6xxx_atu_flush(ds, vlan.fid, true); - if (err) - return err; } *entry = vlan; @@ -1789,8 +1821,14 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, u8 state) { struct mv88e6xxx_atu_entry entry = { 0 }; + struct mv88e6xxx_vtu_stu_entry vlan; + int err; + + err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false); + if (err) + return err; - entry.fid = vid; /* We use one FID per VLAN */ + entry.fid = vlan.fid; entry.state = state; ether_addr_copy(entry.mac, addr); if (state != GLOBAL_ATU_DATA_STATE_UNUSED) { diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 6a30bda63a2f..9df331e85bf8 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -355,6 +355,8 @@ #define GLOBAL2_QOS_WEIGHT 0x1c #define GLOBAL2_MISC 0x1d +#define MV88E6XXX_N_FID 4096 + struct mv88e6xxx_switch_id { u16 id; char *name; -- cgit v1.2.3 From 2db9ce1fd9a34ea560ff120bf763007ddf99c7bb Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:04 -0500 Subject: net: dsa: mv88e6xxx: assign default FDB to ports Restore per-port FDB. Assign them on setup, allow adding and deleting addresses into them, and dump them. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 96 +++++++++++++++++++++++++++++++++++++++++---- drivers/net/dsa/mv88e6xxx.h | 2 + 2 files changed, 91 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index b4b2f05134ba..0f064889ea08 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1458,14 +1458,82 @@ loadpurge: return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE); } +static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, + u16 *old) +{ + u16 fid; + int ret; + + /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); + if (ret < 0) + return ret; + + fid = (ret & PORT_BASE_VLAN_FID_3_0_MASK) >> 12; + + if (new) { + ret &= ~PORT_BASE_VLAN_FID_3_0_MASK; + ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK; + + ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, + ret); + if (ret < 0) + return ret; + } + + /* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */ + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1); + if (ret < 0) + return ret; + + fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4; + + if (new) { + ret &= ~PORT_CONTROL_1_FID_11_4_MASK; + ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK; + + ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, + ret); + if (ret < 0) + return ret; + + netdev_dbg(ds->ports[port], "FID %d (was %d)\n", *new, fid); + } + + if (old) + *old = fid; + + return 0; +} + +static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid) +{ + return _mv88e6xxx_port_fid(ds, port, NULL, fid); +} + +static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid) +{ + return _mv88e6xxx_port_fid(ds, port, &fid, NULL); +} + static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) { + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID); struct mv88e6xxx_vtu_stu_entry vlan; - int err; + int i, err; bitmap_zero(fid_bitmap, MV88E6XXX_N_FID); + /* Set every FID bit used by the (un)bridged ports */ + for (i = 0; i < ps->num_ports; ++i) { + err = _mv88e6xxx_port_fid_get(ds, i, fid); + if (err) + return err; + + set_bit(*fid, fid_bitmap); + } + /* Set every FID bit used by the VLAN entries */ err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK); if (err) @@ -1824,7 +1892,11 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, struct mv88e6xxx_vtu_stu_entry vlan; int err; - err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false); + /* Null VLAN ID corresponds to the port private database */ + if (vid == 0) + err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid); + else + err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false); if (err) return err; @@ -1843,10 +1915,6 @@ int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_fdb *fdb, struct switchdev_trans *trans) { - /* We don't use per-port FDB */ - if (fdb->vid == 0) - return -EOPNOTSUPP; - /* We don't need any dynamic resource from the kernel (yet), * so skip the prepare phase. */ @@ -1982,10 +2050,20 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port, struct mv88e6xxx_vtu_stu_entry vlan = { .vid = GLOBAL_VTU_VID_MASK, /* all ones */ }; + u16 fid; int err; mutex_lock(&ps->smi_mutex); + /* Dump port's default Filtering Information Database (VLAN ID 0) */ + err = _mv88e6xxx_port_fid_get(ds, port, &fid); + if (err) + goto unlock; + + err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb); + if (err) + goto unlock; + /* Dump VLANs' Filtering Information Databases */ err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid); if (err) @@ -2286,9 +2364,13 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) if (ret) goto abort; - /* Port based VLAN map: do not give each port its own address + /* Port based VLAN map: give each port its own address * database, and allow every port to egress frames on all other ports. */ + ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); + if (ret) + goto abort; + reg = BIT(ps->num_ports) - 1; /* all ports */ reg &= ~BIT(port); /* except itself */ ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 9df331e85bf8..85a416620f7c 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -133,7 +133,9 @@ #define PORT_CONTROL_STATE_LEARNING 0x02 #define PORT_CONTROL_STATE_FORWARDING 0x03 #define PORT_CONTROL_1 0x05 +#define PORT_CONTROL_1_FID_11_4_MASK (0xff << 0) #define PORT_BASE_VLAN 0x06 +#define PORT_BASE_VLAN_FID_3_0_MASK (0xf << 12) #define PORT_DEFAULT_VLAN 0x07 #define PORT_DEFAULT_VLAN_MASK 0xfff #define PORT_CONTROL_2 0x08 -- cgit v1.2.3 From 466dfa0770220cebad2e58e1905489328fc9daf7 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:05 -0500 Subject: net: dsa: mv88e6xxx: assign dynamic FDB to bridges Give a new bridge a fresh FDB, assign it to its members, and restore a fresh FDB to a port leaving a bridge. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 41 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 39 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 0f064889ea08..0f169119cbb8 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2093,19 +2093,56 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 fid; + int i, err; + + mutex_lock(&ps->smi_mutex); + + /* Get or create the bridge FID and assign it to the port */ + for (i = 0; i < ps->num_ports; ++i) + if (ps->ports[i].bridge_dev == bridge) + break; + + if (i < ps->num_ports) + err = _mv88e6xxx_port_fid_get(ds, i, &fid); + else + err = _mv88e6xxx_fid_new(ds, &fid); + if (err) + goto unlock; + + err = _mv88e6xxx_port_fid_set(ds, port, fid); + if (err) + goto unlock; ps->ports[port].bridge_dev = bridge; +unlock: + mutex_unlock(&ps->smi_mutex); - return 0; + return err; } int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 fid; + int err; + + mutex_lock(&ps->smi_mutex); + + /* Give the port a fresh Filtering Information Database */ + err = _mv88e6xxx_fid_new(ds, &fid); + if (err) + goto unlock; + + err = _mv88e6xxx_port_fid_set(ds, port, fid); + if (err) + goto unlock; ps->ports[port].bridge_dev = NULL; +unlock: + mutex_unlock(&ps->smi_mutex); - return 0; + return err; } static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port) -- cgit v1.2.3 From b7666efe46caef008eb92f11392f6f839b35d824 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:06 -0500 Subject: net: dsa: mv88e6xxx: restore VLANTable map control The In Chip Port Based VLAN Table contains bits used to restrict which output ports this input port can send frames to. With the VLAN filtering enabled, these tables work in conjunction with the VLAN Table Unit to allow egressing frames. In order to remove the current dependency to BRIDGE_VLAN_FILTERING for basic hardware bridging to work, it is necessary to restore a fine control of each port's VLANTable, on setup and when a port joins or leaves a bridge. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 54 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 47 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 0f169119cbb8..7f3036b15f4d 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1087,12 +1087,32 @@ abort: return ret; } -static int _mv88e6xxx_port_vlan_map_set(struct dsa_switch *ds, int port, - u16 output_ports) +static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct net_device *bridge = ps->ports[port].bridge_dev; const u16 mask = (1 << ps->num_ports) - 1; + u16 output_ports = 0; int reg; + int i; + + /* allow CPU port or DSA link(s) to send frames to every port */ + if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) { + output_ports = mask; + } else { + for (i = 0; i < ps->num_ports; ++i) { + /* allow sending frames to every group member */ + if (bridge && ps->ports[i].bridge_dev == bridge) + output_ports |= BIT(i); + + /* allow sending frames to CPU port and DSA link(s) */ + if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) + output_ports |= BIT(i); + } + } + + /* prevent frames from going back out of the port they came in on */ + output_ports &= ~BIT(port); reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); if (reg < 0) @@ -2114,7 +2134,17 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, if (err) goto unlock; + /* Assign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = bridge; + + for (i = 0; i < ps->num_ports; ++i) { + if (ps->ports[i].bridge_dev == bridge) { + err = _mv88e6xxx_port_based_vlan_map(ds, i); + if (err) + break; + } + } + unlock: mutex_unlock(&ps->smi_mutex); @@ -2124,8 +2154,9 @@ unlock: int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + struct net_device *bridge = ps->ports[port].bridge_dev; u16 fid; - int err; + int i, err; mutex_lock(&ps->smi_mutex); @@ -2138,7 +2169,17 @@ int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) if (err) goto unlock; + /* Unassign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = NULL; + + for (i = 0; i < ps->num_ports; ++i) { + if (i == port || ps->ports[i].bridge_dev == bridge) { + err = _mv88e6xxx_port_based_vlan_map(ds, i); + if (err) + break; + } + } + unlock: mutex_unlock(&ps->smi_mutex); @@ -2402,15 +2443,14 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) goto abort; /* Port based VLAN map: give each port its own address - * database, and allow every port to egress frames on all other ports. + * database, and allow bidirectional communication between the + * CPU and DSA port(s), and the other ports. */ ret = _mv88e6xxx_port_fid_set(ds, port, port + 1); if (ret) goto abort; - reg = BIT(ps->num_ports) - 1; /* all ports */ - reg &= ~BIT(port); /* except itself */ - ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg); + ret = _mv88e6xxx_port_based_vlan_map(ds, port); if (ret) goto abort; -- cgit v1.2.3 From 46fbe5e5af772e0533ba3ab3d9977a5b097b88b5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:07 -0500 Subject: net: dsa: mv88e6xxx: remove reserved VLANs Now that ports isolation is correctly configured when joining or leaving a bridge, there is no need to rely on reserved VLANs to isolate unbridged ports anymore. Thus remove them, and disable 802.1Q on setup. This restores the expected behavior of hardware bridging for systems without 802.1Q or VLAN filtering enabled. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 33 +++------------------------------ 1 file changed, 3 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 7f3036b15f4d..27a19dccfd65 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1718,10 +1718,6 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, { int err; - /* We reserve a few VLANs to isolate unbridged ports */ - if (vlan->vid_end >= 4000) - return -EOPNOTSUPP; - /* If the requested port doesn't belong to the same bridge as the VLAN * members, do not support it (yet) and fallback to software VLAN. */ @@ -1819,7 +1815,6 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port; u16 pvid, vid; int err = 0; @@ -1835,8 +1830,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, goto unlock; if (vid == pvid) { - /* restore reserved VLAN ID */ - err = _mv88e6xxx_port_pvid_set(ds, port, defpvid); + err = _mv88e6xxx_port_pvid_set(ds, port, 0); if (err) goto unlock; } @@ -2186,20 +2180,6 @@ unlock: return err; } -static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; - int err; - - mutex_lock(&ps->smi_mutex); - err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); - if (!err) - err = _mv88e6xxx_port_pvid_set(ds, port, pvid); - mutex_unlock(&ps->smi_mutex); - return err; -} - static void mv88e6xxx_bridge_work(struct work_struct *work) { struct mv88e6xxx_priv_state *ps; @@ -2320,7 +2300,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) } /* Port Control 2: don't force a good FCS, set the maximum frame size to - * 10240 bytes, enable secure 802.1q tags, don't discard tagged or + * 10240 bytes, disable 802.1q tags checking, don't discard tagged or * untagged frames on this port, do a destination address lookup on all * received packets as usual, disable ARP mirroring and don't send a * copy of all transmitted/received frames on this port to the CPU. @@ -2345,7 +2325,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg |= PORT_CONTROL_2_FORWARD_UNKNOWN; } - reg |= PORT_CONTROL_2_8021Q_SECURE; + reg |= PORT_CONTROL_2_8021Q_DISABLED; if (reg) { ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), @@ -2474,13 +2454,6 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds) ret = mv88e6xxx_setup_port(ds, i); if (ret < 0) return ret; - - if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) - continue; - - ret = mv88e6xxx_setup_port_default_vlan(ds, i); - if (ret < 0) - return ret; } return 0; } -- cgit v1.2.3 From 214cdb998739428b09d80b4b152faa7d1e6ad156 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 26 Feb 2016 13:16:08 -0500 Subject: net: dsa: mv88e6xxx: support VLAN filtering Implement port_vlan_filtering in the driver to toggle the related port 802.1Q mode between DISABLED and SECURE, on user request. Signed-off-by: Vivien Didelot Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6171.c | 1 + drivers/net/dsa/mv88e6352.c | 1 + drivers/net/dsa/mv88e6xxx.c | 39 +++++++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6xxx.h | 2 ++ 4 files changed, 43 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index dd1ebaf48077..d72ccbdf53ec 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -106,6 +106,7 @@ struct dsa_switch_driver mv88e6171_switch_driver = { .port_join_bridge = mv88e6xxx_port_bridge_join, .port_leave_bridge = mv88e6xxx_port_bridge_leave, .port_stp_update = mv88e6xxx_port_stp_update, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index bbca36ac4f77..a41fa5043d77 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -327,6 +327,7 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .port_join_bridge = mv88e6xxx_port_bridge_join, .port_leave_bridge = mv88e6xxx_port_bridge_leave, .port_stp_update = mv88e6xxx_port_stp_update, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, .port_vlan_add = mv88e6xxx_port_vlan_add, .port_vlan_del = mv88e6xxx_port_vlan_del, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 27a19dccfd65..d11c9d58cf10 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1712,6 +1712,45 @@ unlock: return err; } +static const char * const mv88e6xxx_port_8021q_mode_names[] = { + [PORT_CONTROL_2_8021Q_DISABLED] = "Disabled", + [PORT_CONTROL_2_8021Q_FALLBACK] = "Fallback", + [PORT_CONTROL_2_8021Q_CHECK] = "Check", + [PORT_CONTROL_2_8021Q_SECURE] = "Secure", +}; + +int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE : + PORT_CONTROL_2_8021Q_DISABLED; + int ret; + + mutex_lock(&ps->smi_mutex); + + ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2); + if (ret < 0) + goto unlock; + + old = ret & PORT_CONTROL_2_8021Q_MASK; + + ret &= ~PORT_CONTROL_2_8021Q_MASK; + ret |= new & PORT_CONTROL_2_8021Q_MASK; + + ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, ret); + if (ret < 0) + goto unlock; + + netdev_dbg(ds->ports[port], "802.1Q Mode: %s (was %s)\n", + mv88e6xxx_port_8021q_mode_names[new], + mv88e6xxx_port_8021q_mode_names[old]); +unlock: + mutex_unlock(&ps->smi_mutex); + + return ret; +} + int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 85a416620f7c..d7b088dd8e16 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -490,6 +490,8 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge); int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); +int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering); int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct switchdev_trans *trans); -- cgit v1.2.3 From daa21560a29673f63263e907045f513a8d66dc33 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 Mar 2016 00:13:32 +0200 Subject: net/mlx5e: Replace async events spinlock with synchronize_irq() We only need to flush the irq handler to make sure it does not queue a work into the global work queue after we start to flush it. So using synchronize_irq() is more appropriate than a spin lock. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 1 - drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 24 ++++++++-------------- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 5 +++++ .../net/ethernet/mellanox/mlx5/core/mlx5_core.h | 1 + 4 files changed, 14 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 1dca3dcf90f5..451198444ace 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -555,7 +555,6 @@ struct mlx5e_priv { struct mlx5e_vxlan_db vxlan; struct mlx5e_params params; - spinlock_t async_events_spinlock; /* sync hw events */ struct work_struct update_carrier_work; struct work_struct set_rx_mode_work; struct delayed_work update_stats_work; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 0d45f35aee72..38944b84c580 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -275,9 +275,14 @@ static void mlx5e_update_stats_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -static void __mlx5e_async_event(struct mlx5e_priv *priv, - enum mlx5_dev_event event) +static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, + enum mlx5_dev_event event, unsigned long param) { + struct mlx5e_priv *priv = vpriv; + + if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) + return; + switch (event) { case MLX5_DEV_EVENT_PORT_UP: case MLX5_DEV_EVENT_PORT_DOWN: @@ -289,17 +294,6 @@ static void __mlx5e_async_event(struct mlx5e_priv *priv, } } -static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, - enum mlx5_dev_event event, unsigned long param) -{ - struct mlx5e_priv *priv = vpriv; - - spin_lock(&priv->async_events_spinlock); - if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state)) - __mlx5e_async_event(priv, event); - spin_unlock(&priv->async_events_spinlock); -} - static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); @@ -307,9 +301,8 @@ static void mlx5e_enable_async_events(struct mlx5e_priv *priv) static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { - spin_lock_irq(&priv->async_events_spinlock); clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state); - spin_unlock_irq(&priv->async_events_spinlock); + synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC)); } #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)) @@ -2290,7 +2283,6 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, mlx5e_ets_init(priv); #endif - spin_lock_init(&priv->async_events_spinlock); mutex_init(&priv->state_lock); INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 647a3ca2c2a9..18fccec72c5d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -442,6 +442,11 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) } EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq); +u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx) +{ + return dev->priv.msix_arr[MLX5_EQ_VEC_ASYNC].vector; +} + int mlx5_eq_init(struct mlx5_core_dev *dev) { int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 0336847ec9a1..0b0b226c789e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -99,6 +99,7 @@ int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev); +u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx); void mlx5e_init(void); void mlx5e_cleanup(void); -- cgit v1.2.3 From ce89ef36d2544b4180fdb9ce9e2918196b0082f8 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 Mar 2016 00:13:33 +0200 Subject: net/mlx5e: Placement changed for carrier state updates More proper to declare carrier state UP only after the channels are ready for traffic. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 38944b84c580..013be09a21be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1451,8 +1451,8 @@ int mlx5e_open_locked(struct net_device *netdev) goto err_close_channels; } - mlx5e_update_carrier(priv); mlx5e_redirect_rqts(priv); + mlx5e_update_carrier(priv); mlx5e_timestamp_init(priv); schedule_delayed_work(&priv->update_stats_work, 0); @@ -1491,8 +1491,8 @@ int mlx5e_close_locked(struct net_device *netdev) clear_bit(MLX5E_STATE_OPENED, &priv->state); mlx5e_timestamp_cleanup(priv); - mlx5e_redirect_rqts(priv); netif_carrier_off(priv->netdev); + mlx5e_redirect_rqts(priv); mlx5e_close_channels(priv); return 0; -- cgit v1.2.3 From 3b6195240c52b38a2d7ecc9cc8407596d9f24663 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 Mar 2016 00:13:34 +0200 Subject: net/mlx5e: Changed naming convention of tx queues in ethtool stats Instead of the pair (channel, tc), we now use a single number that goes over all tx queues of a TC, for all TCs. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index e9760f895744..577b4b1e4de3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -211,13 +211,14 @@ static void mlx5e_get_strings(struct net_device *dev, sprintf(data + (idx++) * ETH_GSTRING_LEN, "rx%d_%s", i, rq_stats_strings[j]); - for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->params.num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) + for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) sprintf(data + - (idx++) * ETH_GSTRING_LEN, - "tx%d_%d_%s", i, tc, - sq_stats_strings[j]); + (idx++) * ETH_GSTRING_LEN, + "tx%d_%s", + priv->channeltc_to_txq_map[i][tc], + sq_stats_strings[j]); break; } } @@ -249,8 +250,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev, &priv->state) ? 0 : ((u64 *)&priv->channel[i]->rq.stats)[j]; - for (i = 0; i < priv->params.num_channels; i++) - for (tc = 0; tc < priv->params.num_tc; tc++) + for (tc = 0; tc < priv->params.num_tc; tc++) + for (i = 0; i < priv->params.num_channels; i++) for (j = 0; j < NUM_SQ_STATS; j++) data[idx++] = !test_bit(MLX5E_STATE_OPENED, &priv->state) ? 0 : -- cgit v1.2.3 From c89fb18b657b07d6c98810182057f844e71d3eee Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 Mar 2016 00:13:35 +0200 Subject: net/mlx5e: Move common case counters within sq_stats struct For data cache locality considerations, we moved the nop and csum_offload_inner within sq_stats struct as they are more commonly accessed in xmit path. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 451198444ace..b289660568cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -260,26 +260,28 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = { "tso_bytes", "tso_inner_packets", "tso_inner_bytes", - "csum_offload_none", "csum_offload_inner", + "nop", + "csum_offload_none", "stopped", "wake", "dropped", - "nop" }; struct mlx5e_sq_stats { + /* commonly accessed in data path */ u64 packets; u64 tso_packets; u64 tso_bytes; u64 tso_inner_packets; u64 tso_inner_bytes; - u64 csum_offload_none; u64 csum_offload_inner; + u64 nop; + /* less likely accessed in data path */ + u64 csum_offload_none; u64 stopped; u64 wake; u64 dropped; - u64 nop; #define NUM_SQ_STATS 11 }; -- cgit v1.2.3 From 556dd1b9c3134501aba707fab66cf4d739e40061 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 2 Mar 2016 00:13:36 +0200 Subject: net/mlx5e: Set drop RQ's necessary parameters only By its role, there is no need to set all the other parameters for the drop RQ. Signed-off-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 013be09a21be..30fd9716d85f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1064,6 +1064,15 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv, param->wq.linear = 1; } +static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param) +{ + void *rqc = param->rqc; + void *wq = MLX5_ADDR_OF(rqc, rqc, wq); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); + MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); +} + static void mlx5e_build_sq_param(struct mlx5e_priv *priv, struct mlx5e_sq_param *param) { @@ -1574,8 +1583,7 @@ static int mlx5e_open_drop_rq(struct mlx5e_priv *priv) memset(&cq_param, 0, sizeof(cq_param)); memset(&rq_param, 0, sizeof(rq_param)); - mlx5e_build_rx_cq_param(priv, &cq_param); - mlx5e_build_rq_param(priv, &rq_param); + mlx5e_build_drop_rq_param(&rq_param); err = mlx5e_create_drop_cq(priv, cq, &cq_param); if (err) -- cgit v1.2.3 From 7524a5d88b94afef8397a79f1e664af5b7052c22 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 2 Mar 2016 00:13:37 +0200 Subject: net/mlx5e: Don't try to modify CQ moderation if it is not supported If CQ moderation is not supported by the device, print a warning on netdevice load, and return error when trying to modify/query cq moderation via ethtool. Fixes: f62b8bb8f2d3 ('net/mlx5: Extend mlx5_core to support ConnectX-4 Ethernet functionality') Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 6 ++++++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++------ 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 577b4b1e4de3..a1b3bb4358b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -400,6 +400,9 @@ static int mlx5e_get_coalesce(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); + if (!MLX5_CAP_GEN(priv->mdev, cq_moderation)) + return -ENOTSUPP; + coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec; coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts; coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec; @@ -417,6 +420,9 @@ static int mlx5e_set_coalesce(struct net_device *netdev, int tc; int i; + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + return -ENOTSUPP; + priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 30fd9716d85f..b20a35bd1d4f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -870,12 +870,10 @@ static int mlx5e_open_cq(struct mlx5e_channel *c, if (err) goto err_destroy_cq; - err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq, - moderation_usecs, - moderation_frames); - if (err) - goto err_destroy_cq; - + if (MLX5_CAP_GEN(mdev, cq_moderation)) + mlx5_core_modify_cq_moderation(mdev, &cq->mcq, + moderation_usecs, + moderation_frames); return 0; err_destroy_cq: @@ -2218,6 +2216,8 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) } if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable)) mlx5_core_warn(mdev, "Self loop back prevention is not supported\n"); + if (!MLX5_CAP_GEN(mdev, cq_moderation)) + mlx5_core_warn(mdev, "CQ modiration is not supported\n"); return 0; } -- cgit v1.2.3 From 2fcb92fbd04eef26dfe7e67839da6262d83d6b65 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Wed, 2 Mar 2016 00:13:38 +0200 Subject: net/mlx5e: Don't modify CQ before it was created Calling mlx5e_set_coalesce while the interface is down will result in modifying CQs that don't exist. Fixes: f62b8bb8f2d3 ('net/mlx5: Extend mlx5_core to support ConnectX-4 Ethernet functionality') Signed-off-by: Gal Pressman Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index a1b3bb4358b5..0959656404b3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -423,11 +423,15 @@ static int mlx5e_set_coalesce(struct net_device *netdev, if (!MLX5_CAP_GEN(mdev, cq_moderation)) return -ENOTSUPP; + mutex_lock(&priv->state_lock); priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs; priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames; priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs; priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) + goto out; + for (i = 0; i < priv->params.num_channels; ++i) { c = priv->channel[i]; @@ -443,6 +447,8 @@ static int mlx5e_set_coalesce(struct net_device *netdev, coal->rx_max_coalesced_frames); } +out: + mutex_unlock(&priv->state_lock); return 0; } -- cgit v1.2.3 From 0ba422410bbf7081c3c7d7b2dcc10e9eb5cb46f7 Mon Sep 17 00:00:00 2001 From: Moshe Lazer Date: Wed, 2 Mar 2016 00:13:40 +0200 Subject: net/mlx5: Fix global UAR mapping Avoid double mapping of io mapped memory, Device page may be mapped to non-cached(NC) or to write-combining(WC). The code before this fix tries to map it both to WC and NC contrary to what stated in Intel's software developer manual. Here we remove the global WC mapping of all UARS "dev->priv.bf_mapping", since UAR mapping should be decided per UAR (e.g we want different mappings for EQs, CQs vs QPs). Caller will now have to choose whether to map via write-combining API or not. mlx5e SQs will choose write-combining in order to perform BlueFlame writes. Fixes: 88a85f99e51f ('TX latency optimization to save DMA reads') Signed-off-by: Moshe Lazer Reviewed-by: Achiad Shochat Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 16 +++++-------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 12 ++++++---- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/main.c | 28 +--------------------- drivers/net/ethernet/mellanox/mlx5/core/uar.c | 29 ++++++++++++++--------- include/linux/mlx5/driver.h | 5 ++-- 6 files changed, 36 insertions(+), 56 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b289660568cf..9c0e80e64b43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -388,6 +388,7 @@ struct mlx5e_sq_dma { enum { MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, + MLX5E_SQ_STATE_BF_ENABLE, }; struct mlx5e_sq { @@ -416,7 +417,6 @@ struct mlx5e_sq { struct mlx5_wq_cyc wq; u32 dma_fifo_mask; void __iomem *uar_map; - void __iomem *uar_bf_map; struct netdev_queue *txq; u32 sqn; u16 bf_buf_size; @@ -664,16 +664,12 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, * doorbell */ wmb(); - - if (bf_sz) { - __iowrite64_copy(sq->uar_bf_map + ofst, &wqe->ctrl, bf_sz); - - /* flush the write-combining mapped buffer */ - wmb(); - - } else { + if (bf_sz) + __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz); + else mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL); - } + /* flush the write-combining mapped buffer */ + wmb(); sq->bf_offset ^= sq->bf_buf_size; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b20a35bd1d4f..5063c0e0f8ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -548,7 +548,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, int txq_ix; int err; - err = mlx5_alloc_map_uar(mdev, &sq->uar); + err = mlx5_alloc_map_uar(mdev, &sq->uar, true); if (err) return err; @@ -560,8 +560,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c, goto err_unmap_free_uar; sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; - sq->uar_map = sq->uar.map; - sq->uar_bf_map = sq->uar.bf_map; + if (sq->uar.bf_map) { + set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state); + sq->uar_map = sq->uar.bf_map; + } else { + sq->uar_map = sq->uar.map; + } sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2; sq->max_inline = param->max_inline; @@ -2418,7 +2422,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) priv = netdev_priv(netdev); - err = mlx5_alloc_map_uar(mdev, &priv->cq_uar); + err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false); if (err) { mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err); goto err_free_netdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index a05c070cbc2f..c34f4f3e9537 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -303,7 +303,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) { int bf_sz = 0; - if (bf && sq->uar_bf_map) + if (bf && test_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state)) bf_sz = wi->num_wqebbs << 3; cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 1545a944c309..8b7133de498e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -767,22 +767,6 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) return -ENOTSUPP; } -static int map_bf_area(struct mlx5_core_dev *dev) -{ - resource_size_t bf_start = pci_resource_start(dev->pdev, 0); - resource_size_t bf_len = pci_resource_len(dev->pdev, 0); - - dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len); - - return dev->priv.bf_mapping ? 0 : -ENOMEM; -} - -static void unmap_bf_area(struct mlx5_core_dev *dev) -{ - if (dev->priv.bf_mapping) - io_mapping_free(dev->priv.bf_mapping); -} - static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { struct mlx5_device_context *dev_ctx; @@ -1103,14 +1087,9 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) goto err_stop_eqs; } - if (map_bf_area(dev)) - dev_err(&pdev->dev, "Failed to map blue flame area\n"); - err = mlx5_irq_set_affinity_hints(dev); - if (err) { + if (err) dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); - goto err_unmap_bf_area; - } MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock); @@ -1169,10 +1148,6 @@ err_fs: mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_irq_clear_affinity_hints(dev); - -err_unmap_bf_area: - unmap_bf_area(dev); - free_comp_eqs(dev); err_stop_eqs: @@ -1242,7 +1217,6 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv) mlx5_cleanup_qp_table(dev); mlx5_cleanup_cq_table(dev); mlx5_irq_clear_affinity_hints(dev); - unmap_bf_area(dev); free_comp_eqs(dev); mlx5_stop_eqs(dev); mlx5_free_uuars(dev, &priv->uuari); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index eb05c845ece9..8ba080e441a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c @@ -226,7 +226,8 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) return 0; } -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, + bool map_wc) { phys_addr_t pfn; phys_addr_t uar_bar_start; @@ -240,20 +241,26 @@ int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) uar_bar_start = pci_resource_start(mdev->pdev, 0); pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index; - uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); - if (!uar->map) { - mlx5_core_warn(mdev, "ioremap() failed, %d\n", err); - err = -ENOMEM; - goto err_free_uar; - } - if (mdev->priv.bf_mapping) - uar->bf_map = io_mapping_map_wc(mdev->priv.bf_mapping, - uar->index << PAGE_SHIFT); + if (map_wc) { + uar->bf_map = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->bf_map) { + mlx5_core_warn(mdev, "ioremap_wc() failed\n"); + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) + goto err_free_uar; + } + } else { + uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); + if (!uar->map) + goto err_free_uar; + } return 0; err_free_uar: + mlx5_core_warn(mdev, "ioremap() failed\n"); + err = -ENOMEM; mlx5_cmd_free_uar(mdev, uar->index); return err; @@ -262,8 +269,8 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar) { - io_mapping_unmap(uar->bf_map); iounmap(uar->map); + iounmap(uar->bf_map); mlx5_cmd_free_uar(mdev, uar->index); } EXPORT_SYMBOL(mlx5_unmap_free_uar); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 3388a43b78f6..bb1a880a5bc5 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -460,8 +460,6 @@ struct mlx5_priv { struct mlx5_uuar_info uuari; MLX5_DECLARE_DOORBELL_LOCK(cq_uar_lock); - struct io_mapping *bf_mapping; - /* pages stuff */ struct workqueue_struct *pg_wq; struct rb_root page_root; @@ -719,7 +717,8 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn); int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn); int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari); -int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); +int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar, + bool map_wc); void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar); void mlx5_health_cleanup(struct mlx5_core_dev *dev); int mlx5_health_init(struct mlx5_core_dev *dev); -- cgit v1.2.3 From 4ac801b77e6f06e6b12c069fd29216a4102065fb Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 28 Feb 2016 12:26:52 +0200 Subject: qed: Semantic refactoring of interrupt code Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 6 +- drivers/net/ethernet/qlogic/qed/qed_int.c | 155 ++++++++++++++++------------- drivers/net/ethernet/qlogic/qed/qed_int.h | 6 +- drivers/net/ethernet/qlogic/qed/qed_main.c | 15 +-- include/linux/qed/qed_if.h | 6 ++ 5 files changed, 111 insertions(+), 77 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index acfe7be49a58..d9a5175ebd04 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1011,13 +1011,17 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn) { u32 *resc_start = p_hwfn->hw_info.resc_start; u32 *resc_num = p_hwfn->hw_info.resc_num; + struct qed_sb_cnt_info sb_cnt_info; int num_funcs, i; num_funcs = MAX_NUM_PFS_BB; + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(p_hwfn, &sb_cnt_info); + resc_num[QED_SB] = min_t(u32, (MAX_SB_PER_PATH_BB / num_funcs), - qed_int_get_num_sbs(p_hwfn, NULL)); + sb_cnt_info.sb_cnt); resc_num[QED_L2_QUEUE] = MAX_NUM_L2_QUEUES_BB / num_funcs; resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs; resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index fa73daa94655..7fd1be61de5c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -343,17 +343,17 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie) static void qed_int_sb_attn_free(struct qed_hwfn *p_hwfn) { - struct qed_dev *cdev = p_hwfn->cdev; - struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; - - if (p_sb) { - if (p_sb->sb_attn) - dma_free_coherent(&cdev->pdev->dev, - SB_ATTN_ALIGNED_SIZE(p_hwfn), - p_sb->sb_attn, - p_sb->sb_phys); - kfree(p_sb); - } + struct qed_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (!p_sb) + return; + + if (p_sb->sb_attn) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ATTN_ALIGNED_SIZE(p_hwfn), + p_sb->sb_attn, + p_sb->sb_phys); + kfree(p_sb); } static void qed_int_sb_attn_setup(struct qed_hwfn *p_hwfn, @@ -433,6 +433,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, u16 vf_number, u8 vf_valid) { + struct qed_dev *cdev = p_hwfn->cdev; u32 cau_state; memset(p_sb_entry, 0, sizeof(*p_sb_entry)); @@ -451,14 +452,12 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, cau_state = CAU_HC_DISABLE_STATE; - if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { + if (cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) { cau_state = CAU_HC_ENABLE_STATE; - if (!p_hwfn->cdev->rx_coalesce_usecs) - p_hwfn->cdev->rx_coalesce_usecs = - QED_CAU_DEF_RX_USECS; - if (!p_hwfn->cdev->tx_coalesce_usecs) - p_hwfn->cdev->tx_coalesce_usecs = - QED_CAU_DEF_TX_USECS; + if (!cdev->rx_coalesce_usecs) + cdev->rx_coalesce_usecs = QED_CAU_DEF_RX_USECS; + if (!cdev->tx_coalesce_usecs) + cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS; } SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); @@ -638,8 +637,10 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn, sb_info->sb_ack = 0; memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); - p_hwfn->sbs_info[sb_id] = NULL; - p_hwfn->num_sbs--; + if (p_hwfn->sbs_info[sb_id] != NULL) { + p_hwfn->sbs_info[sb_id] = NULL; + p_hwfn->num_sbs--; + } return 0; } @@ -648,14 +649,15 @@ static void qed_int_sp_sb_free(struct qed_hwfn *p_hwfn) { struct qed_sb_sp_info *p_sb = p_hwfn->p_sp_sb; - if (p_sb) { - if (p_sb->sb_info.sb_virt) - dma_free_coherent(&p_hwfn->cdev->pdev->dev, - SB_ALIGNED_SIZE(p_hwfn), - p_sb->sb_info.sb_virt, - p_sb->sb_info.sb_phys); - kfree(p_sb); - } + if (!p_sb) + return; + + if (p_sb->sb_info.sb_virt) + dma_free_coherent(&p_hwfn->cdev->pdev->dev, + SB_ALIGNED_SIZE(p_hwfn), + p_sb->sb_info.sb_virt, + p_sb->sb_info.sb_phys); + kfree(p_sb); } static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, @@ -718,36 +720,36 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; - int qed_status = -ENOMEM; + int rc = -ENOMEM; u8 pi; /* Look for a free index */ for (pi = 0; pi < ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { - if (!p_sp_sb->pi_info_arr[pi].comp_cb) { - p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; - p_sp_sb->pi_info_arr[pi].cookie = cookie; - *sb_idx = pi; - *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; - qed_status = 0; - break; - } + if (p_sp_sb->pi_info_arr[pi].comp_cb) + continue; + + p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; + p_sp_sb->pi_info_arr[pi].cookie = cookie; + *sb_idx = pi; + *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; + rc = 0; + break; } - return qed_status; + return rc; } int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi) { struct qed_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; - int qed_status = -ENOMEM; - if (p_sp_sb->pi_info_arr[pi].comp_cb) { - p_sp_sb->pi_info_arr[pi].comp_cb = NULL; - p_sp_sb->pi_info_arr[pi].cookie = NULL; - qed_status = 0; - } + if (p_sp_sb->pi_info_arr[pi].comp_cb == NULL) + return -ENOMEM; - return qed_status; + p_sp_sb->pi_info_arr[pi].comp_cb = NULL; + p_sp_sb->pi_info_arr[pi].cookie = NULL; + + return 0; } u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn) @@ -937,6 +939,39 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, } } +static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u16 sb_id) +{ + u32 val = qed_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + + sizeof(u32) * sb_id); + struct qed_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + + /* stop scanning when hit first invalid PF entry */ + if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && + GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) + goto out; + + /* Fill the block information */ + p_block->status = QED_IGU_STATUS_VALID; + p_block->function_id = GET_FIELD(val, + IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, + IGU_MAPPING_LINE_VECTOR_NUMBER); + + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d is_pf = %d vector_num = 0x%x\n", + sb_id, val, p_block->function_id, + p_block->is_pf, p_block->vector_number); + +out: + return val; +} + int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -963,26 +998,13 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, sb_id++) { blk = &p_igu_info->igu_map.igu_blocks[sb_id]; - val = qed_rd(p_hwfn, p_ptt, - IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); + val = qed_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); /* stop scanning when hit first invalid PF entry */ if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) break; - blk->status = QED_IGU_STATUS_VALID; - blk->function_id = GET_FIELD(val, - IGU_MAPPING_LINE_FUNCTION_NUMBER); - blk->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); - blk->vector_number = GET_FIELD(val, - IGU_MAPPING_LINE_VECTOR_NUMBER); - - DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, - "IGU_BLOCK[sb_id]:%x:func_id = %d is_pf = %d vector_num = 0x%x\n", - val, blk->function_id, blk->is_pf, - blk->vector_number); - if (blk->is_pf) { if (blk->function_id == p_hwfn->rel_pf_id) { blk->status |= QED_IGU_STATUS_PF; @@ -1121,18 +1143,17 @@ void qed_int_setup(struct qed_hwfn *p_hwfn, qed_int_sp_dpc_setup(p_hwfn); } -int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, - int *p_iov_blks) +void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + struct qed_sb_cnt_info *p_sb_cnt_info) { struct qed_igu_info *info = p_hwfn->hw_info.p_igu_info; - if (!info) - return 0; - - if (p_iov_blks) - *p_iov_blks = info->free_blks; + if (!info || !p_sb_cnt_info) + return; - return info->igu_sb_cnt; + p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; + p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; + p_sb_cnt_info->sb_free_blk = info->free_blks; } void qed_int_disable_post_isr_release(struct qed_dev *cdev) diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 51e0b09a7f47..c57f2e680770 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -161,12 +161,12 @@ void qed_int_sp_dpc(unsigned long hwfn_cookie); * blocks configured for this funciton in the igu. * * @param p_hwfn - * @param p_iov_blks - configured free blks for vfs + * @param p_sb_cnt_info * * @return int - number of status blocks configured */ -int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, - int *p_iov_blks); +void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, + struct qed_sb_cnt_info *p_sb_cnt_info); /** * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 25d6e91335ea..caa689e6575c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -634,15 +634,18 @@ static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) static int qed_slowpath_setup_int(struct qed_dev *cdev, enum qed_int_mode int_mode) { - int rc, i; - u8 num_vectors = 0; - + struct qed_sb_cnt_info sb_cnt_info; + int rc; + int i; memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); cdev->int_params.in.int_mode = int_mode; - for_each_hwfn(cdev, i) - num_vectors += qed_int_get_num_sbs(&cdev->hwfns[i], NULL) + 1; - cdev->int_params.in.num_vectors = num_vectors; + for_each_hwfn(cdev, i) { + memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); + qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); + cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; + cdev->int_params.in.num_vectors++; /* slowpath */ + } /* We want a minimum of one slowpath and one fastpath vector per hwfn */ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 3d43c1d4ecef..1f7599c77cd4 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -446,6 +446,12 @@ struct qed_eth_stats { #define RX_PI 0 #define TX_PI(tc) (RX_PI + 1 + tc) +struct qed_sb_cnt_info { + int sb_cnt; + int sb_iov_cnt; + int sb_free_blk; +}; + static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) { u32 prod = 0; -- cgit v1.2.3 From 0d956e8a65d53e0d1a71d28975c821cf0f6ba676 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 28 Feb 2016 12:26:53 +0200 Subject: qed: Add support for HW attentions HW is capable of generating attentnions for a multitude of reasons, but current driver is enabling attention generation only for management firmware [required for link notifications]. This patch enables almost all of the possible reasons for HW attentions, logging the HW block generating the attention and preventing further attentions from that source [to prevent possible attention flood]. It also lays the infrastructure for additional exploration of the various attentions. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_int.c | 395 ++++++++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 2 + 2 files changed, 357 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 7fd1be61de5c..c914ac5940eb 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -42,21 +42,210 @@ struct qed_sb_sp_info { #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) -#define ATTN_STATE_BITS (0xfff) +struct aeu_invert_reg_bit { + char bit_name[30]; + +#define ATTENTION_PARITY (1 << 0) + +#define ATTENTION_LENGTH_MASK (0x00000ff0) +#define ATTENTION_LENGTH_SHIFT (4) +#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ + ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) +#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) +#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ + ATTENTION_PARITY) + +/* Multiple bits start with this offset */ +#define ATTENTION_OFFSET_MASK (0x000ff000) +#define ATTENTION_OFFSET_SHIFT (12) + unsigned int flags; +}; + +struct aeu_invert_reg { + struct aeu_invert_reg_bit bits[32]; +}; + +#define MAX_ATTN_GRPS (8) +#define NUM_ATTN_REGS (9) + +/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ +static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { + { + { /* After Invert 1 */ + {"GPIO0 function%d", + (32 << ATTENTION_LENGTH_SHIFT)}, + } + }, + + { + { /* After Invert 2 */ + {"PGLUE config_space", ATTENTION_SINGLE}, + {"PGLUE misc_flr", ATTENTION_SINGLE}, + {"PGLUE B RBC", ATTENTION_PAR_INT}, + {"PGLUE misc_mctp", ATTENTION_SINGLE}, + {"Flash event", ATTENTION_SINGLE}, + {"SMB event", ATTENTION_SINGLE}, + {"Main Power", ATTENTION_SINGLE}, + {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | + (1 << ATTENTION_OFFSET_SHIFT)}, + {"PCIE glue/PXP VPD %d", + (16 << ATTENTION_LENGTH_SHIFT)}, + } + }, + + { + { /* After Invert 3 */ + {"General Attention %d", + (32 << ATTENTION_LENGTH_SHIFT)}, + } + }, + + { + { /* After Invert 4 */ + {"General Attention 32", ATTENTION_SINGLE}, + {"General Attention %d", + (2 << ATTENTION_LENGTH_SHIFT) | + (33 << ATTENTION_OFFSET_SHIFT)}, + {"General Attention 35", ATTENTION_SINGLE}, + {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT)}, + {"MCP CPU", ATTENTION_SINGLE}, + {"MCP Watchdog timer", ATTENTION_SINGLE}, + {"MCP M2P", ATTENTION_SINGLE}, + {"AVS stop status ready", ATTENTION_SINGLE}, + {"MSTAT", ATTENTION_PAR_INT}, + {"MSTAT per-path", ATTENTION_PAR_INT}, + {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT)}, + {"NIG", ATTENTION_PAR_INT}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT}, + {"BTB", ATTENTION_PAR_INT}, + {"BRB", ATTENTION_PAR_INT}, + {"PRS", ATTENTION_PAR_INT}, + } + }, + + { + { /* After Invert 5 */ + {"SRC", ATTENTION_PAR_INT}, + {"PB Client1", ATTENTION_PAR_INT}, + {"PB Client2", ATTENTION_PAR_INT}, + {"RPB", ATTENTION_PAR_INT}, + {"PBF", ATTENTION_PAR_INT}, + {"QM", ATTENTION_PAR_INT}, + {"TM", ATTENTION_PAR_INT}, + {"MCM", ATTENTION_PAR_INT}, + {"MSDM", ATTENTION_PAR_INT}, + {"MSEM", ATTENTION_PAR_INT}, + {"PCM", ATTENTION_PAR_INT}, + {"PSDM", ATTENTION_PAR_INT}, + {"PSEM", ATTENTION_PAR_INT}, + {"TCM", ATTENTION_PAR_INT}, + {"TSDM", ATTENTION_PAR_INT}, + {"TSEM", ATTENTION_PAR_INT}, + } + }, + + { + { /* After Invert 6 */ + {"UCM", ATTENTION_PAR_INT}, + {"USDM", ATTENTION_PAR_INT}, + {"USEM", ATTENTION_PAR_INT}, + {"XCM", ATTENTION_PAR_INT}, + {"XSDM", ATTENTION_PAR_INT}, + {"XSEM", ATTENTION_PAR_INT}, + {"YCM", ATTENTION_PAR_INT}, + {"YSDM", ATTENTION_PAR_INT}, + {"YSEM", ATTENTION_PAR_INT}, + {"XYLD", ATTENTION_PAR_INT}, + {"TMLD", ATTENTION_PAR_INT}, + {"MYLD", ATTENTION_PAR_INT}, + {"YULD", ATTENTION_PAR_INT}, + {"DORQ", ATTENTION_PAR_INT}, + {"DBG", ATTENTION_PAR_INT}, + {"IPC", ATTENTION_PAR_INT}, + } + }, + + { + { /* After Invert 7 */ + {"CCFC", ATTENTION_PAR_INT}, + {"CDU", ATTENTION_PAR_INT}, + {"DMAE", ATTENTION_PAR_INT}, + {"IGU", ATTENTION_PAR_INT}, + {"ATC", ATTENTION_PAR_INT}, + {"CAU", ATTENTION_PAR_INT}, + {"PTU", ATTENTION_PAR_INT}, + {"PRM", ATTENTION_PAR_INT}, + {"TCFC", ATTENTION_PAR_INT}, + {"RDIF", ATTENTION_PAR_INT}, + {"TDIF", ATTENTION_PAR_INT}, + {"RSS", ATTENTION_PAR_INT}, + {"MISC", ATTENTION_PAR_INT}, + {"MISCS", ATTENTION_PAR_INT}, + {"PCIE", ATTENTION_PAR}, + {"Vaux PCI core", ATTENTION_SINGLE}, + {"PSWRQ", ATTENTION_PAR_INT}, + } + }, + + { + { /* After Invert 8 */ + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT}, + {"PSWWR", ATTENTION_PAR_INT}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT}, + {"PSWRD", ATTENTION_PAR_INT}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT}, + {"PSWHST", ATTENTION_PAR_INT}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT}, + {"GRC", ATTENTION_PAR_INT}, + {"CPMU", ATTENTION_PAR_INT}, + {"NCSI", ATTENTION_PAR_INT}, + {"MSEM PRAM", ATTENTION_PAR}, + {"PSEM PRAM", ATTENTION_PAR}, + {"TSEM PRAM", ATTENTION_PAR}, + {"USEM PRAM", ATTENTION_PAR}, + {"XSEM PRAM", ATTENTION_PAR}, + {"YSEM PRAM", ATTENTION_PAR}, + {"pxp_misc_mps", ATTENTION_PAR}, + {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE}, + {"PERST_B assertion", ATTENTION_SINGLE}, + {"PERST_B deassertion", ATTENTION_SINGLE}, + {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT)}, + } + }, + + { + { /* After Invert 9 */ + {"MCP Latched memory", ATTENTION_PAR}, + {"MCP Latched scratchpad cache", ATTENTION_SINGLE}, + {"MCP Latched ump_tx", ATTENTION_PAR}, + {"MCP Latched scratchpad", ATTENTION_PAR}, + {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT)}, + } + }, +}; + +#define ATTN_STATE_BITS (0xfff) #define ATTN_BITS_MASKABLE (0x3ff) struct qed_sb_attn_info { /* Virtual & Physical address of the SB */ struct atten_status_block *sb_attn; - dma_addr_t sb_phys; + dma_addr_t sb_phys; /* Last seen running index */ - u16 index; + u16 index; + + /* A mask of the AEU bits resulting in a parity error */ + u32 parity_mask[NUM_ATTN_REGS]; + + /* A pointer to the attention description structure */ + struct aeu_invert_reg *p_aeu_desc; /* Previously asserted attentions, which are still unasserted */ - u16 known_attn; + u16 known_attn; /* Cleanup address for the link's general hw attention */ - u32 mfw_attn_addr; + u32 mfw_attn_addr; }; static inline u16 qed_attn_update_idx(struct qed_hwfn *p_hwfn, @@ -127,6 +316,39 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn, return 0; } +/** + * @brief qed_int_deassertion_aeu_bit - handles the effects of a single + * cause of the attention + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the attention + * @param aeu_en_reg - register offset of the AEU enable reg. which configured + * this bit to this group. + * @param bit_index - index of this bit in the aeu_en_reg + * + * @return int + */ +static int +qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, + u32 bitmask) +{ + int rc = -EINVAL; + u32 val, mask = ~bitmask; + + DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", + p_aeu->bit_name, bitmask); + + /* Prevent this Attention from being asserted in the future */ + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); + DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", + p_aeu->bit_name); + + return rc; +} + /** * @brief - handles deassertion of previously asserted attentions. * @@ -139,17 +361,110 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, u16 deasserted_bits) { struct qed_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; - u32 aeu_mask; + u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; + u8 i, j, k, bit_idx; + int rc = 0; + + /* Read the attention registers in the AEU */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + aeu_inv_arr[i] = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_AFTER_INVERT_1_IGU + + i * 0x4); + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Deasserted bits [%d]: %08x\n", + i, aeu_inv_arr[i]); + } + + /* Find parity attentions first */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; + u32 en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32)); + u32 parities; + + /* Skip register in which no parity bit is currently set */ + parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; + if (!parities) + continue; - if (deasserted_bits != 0x100) - DP_ERR(p_hwfn, "Unexpected - non-link deassertion\n"); + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; + + if ((p_bit->flags & ATTENTION_PARITY) && + !!(parities & (1 << bit_idx))) { + DP_INFO(p_hwfn, + "%s[%d] parity attention is set\n", + p_bit->bit_name, bit_idx); + } + + bit_idx += ATTENTION_LENGTH(p_bit->flags); + } + } + + /* Find non-parity cause for attention and act */ + for (k = 0; k < MAX_ATTN_GRPS; k++) { + struct aeu_invert_reg_bit *p_aeu; + + /* Handle only groups whose attention is currently deasserted */ + if (!(deasserted_bits & (1 << k))) + continue; + + for (i = 0; i < NUM_ATTN_REGS; i++) { + u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32) + + k * sizeof(u32) * NUM_ATTN_REGS; + u32 en, bits; + + en = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + bits = aeu_inv_arr[i] & en; + + /* Skip if no bit from this group is currently set */ + if (!bits) + continue; + + /* Find all set bits from current register which belong + * to current group, making them responsible for the + * previous assertion. + */ + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + u8 bit, bit_len; + u32 bitmask; + + p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; + + /* No need to handle parity-only bits */ + if (p_aeu->flags == ATTENTION_PAR) + continue; + + bit = bit_idx; + bit_len = ATTENTION_LENGTH(p_aeu->flags); + if (p_aeu->flags & ATTENTION_PAR_INT) { + /* Skip Parity */ + bit++; + bit_len--; + } + + bitmask = bits & (((1 << bit_len) - 1) << bit); + if (bitmask) { + /* Handle source of the attention */ + qed_int_deassertion_aeu_bit(p_hwfn, + p_aeu, + aeu_en, + bitmask); + } + + bit_idx += ATTENTION_LENGTH(p_aeu->flags); + } + } + } /* Clear IGU indication for the deasserted bits */ DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview + - GTT_BAR0_MAP_REG_IGU_CMD + - ((IGU_CMD_ATTN_BIT_CLR_UPPER - - IGU_CMD_INT_ACK_BASE) << 3), - ~((u32)deasserted_bits)); + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), + ~((u32)deasserted_bits)); /* Unmask deasserted attentions in IGU */ aeu_mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, @@ -160,7 +475,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, /* Clear deassertion from inner state */ sb_attn_sw->known_attn &= ~deasserted_bits; - return 0; + return rc; } static int qed_int_attentions(struct qed_hwfn *p_hwfn) @@ -379,10 +694,31 @@ static void qed_int_sb_attn_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr) { struct qed_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + int i, j, k; sb_info->sb_attn = sb_virt_addr; sb_info->sb_phys = sb_phy_addr; + /* Set the pointer to the AEU descriptors */ + sb_info->p_aeu_desc = aeu_descs; + + /* Calculate Parity Masks */ + memset(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); + for (i = 0; i < NUM_ATTN_REGS; i++) { + /* j is array index, k is bit index */ + for (j = 0, k = 0; k < 32; j++) { + unsigned int flags = aeu_descs[i].bits[j].flags; + + if (flags & ATTENTION_PARITY) + sb_info->parity_mask[i] |= 1 << k; + + k += ATTENTION_LENGTH(flags); + } + DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, + "Attn Mask [Reg %d]: 0x%08x\n", + i, sb_info->parity_mask[i]); + } + /* Set the address of cleanup for the mcp attention */ sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + MISC_REG_AEU_GENERAL_ATTN_0; @@ -694,25 +1030,6 @@ static int qed_int_sp_sb_alloc(struct qed_hwfn *p_hwfn, return 0; } -static void qed_int_sp_sb_setup(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) -{ - if (!p_hwfn) - return; - - if (p_hwfn->p_sp_sb) - qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); - else - DP_NOTICE(p_hwfn->cdev, - "Failed to setup Slow path status block - NULL pointer\n"); - - if (p_hwfn->p_sb_attn) - qed_int_sb_attn_setup(p_hwfn, p_ptt); - else - DP_NOTICE(p_hwfn->cdev, - "Failed to setup attentions status block - NULL pointer\n"); -} - int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, void *cookie, @@ -788,16 +1105,13 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode) { - int rc, i; - - /* Mask non-link attentions */ - for (i = 0; i < 9; i++) - qed_wr(p_hwfn, p_ptt, - MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0); + int rc; - /* Configure AEU signal change to produce attentions for link */ + /* Configure AEU signal change to produce attentions */ + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + qed_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); /* Flush the writes to IGU */ mmiowb(); @@ -1139,7 +1453,8 @@ void qed_int_free(struct qed_hwfn *p_hwfn) void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { - qed_int_sp_sb_setup(p_hwfn, p_ptt); + qed_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); + qed_int_sb_attn_setup(p_hwfn, p_ptt); qed_int_sp_dpc_setup(p_hwfn); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index e8df12335a97..2286e75642e9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -333,6 +333,8 @@ 0x180800UL #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ 0x00849cUL +#define MISC_REG_AEU_AFTER_INVERT_1_IGU \ + 0x0087b4UL #define MISC_REG_AEU_MASK_ATTN_IGU \ 0x008494UL #define IGU_REG_CLEANUP_STATUS_0 \ -- cgit v1.2.3 From ff38577aa9534fed1434d2ff8a0d1666a7f11fe4 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 28 Feb 2016 12:26:54 +0200 Subject: qed: Print HW attention reasons Each HW block contains common information about attention reasons, raising a bit for each one of the different sub-reasons that caused it to raise an attention. This patch extends the infrastructure by allowing logging of the various reasons causing the HW blocks to generate an attention. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_int.c | 1543 +++++++++++++++++++++++++++-- 1 file changed, 1436 insertions(+), 107 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index c914ac5940eb..c8bca7776b43 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -39,6 +39,11 @@ struct qed_sb_sp_info { struct qed_pi_info pi_info_arr[PIS_PER_SB]; }; +enum qed_attention_type { + QED_ATTN_TYPE_ATTN, + QED_ATTN_TYPE_PARITY, +}; + #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) @@ -60,6 +65,8 @@ struct aeu_invert_reg_bit { #define ATTENTION_OFFSET_MASK (0x000ff000) #define ATTENTION_OFFSET_SHIFT (12) unsigned int flags; + + enum block_id block_index; }; struct aeu_invert_reg { @@ -69,158 +76,1379 @@ struct aeu_invert_reg { #define MAX_ATTN_GRPS (8) #define NUM_ATTN_REGS (9) +/* HW Attention register */ +struct attn_hw_reg { + u16 reg_idx; /* Index of this register in its block */ + u16 num_of_bits; /* number of valid attention bits */ + u32 sts_addr; /* Address of the STS register */ + u32 sts_clr_addr; /* Address of the STS_CLR register */ + u32 sts_wr_addr; /* Address of the STS_WR register */ + u32 mask_addr; /* Address of the MASK register */ +}; + +/* HW block attention registers */ +struct attn_hw_regs { + u16 num_of_int_regs; /* Number of interrupt regs */ + u16 num_of_prty_regs; /* Number of parity regs */ + struct attn_hw_reg **int_regs; /* interrupt regs */ + struct attn_hw_reg **prty_regs; /* parity regs */ +}; + +/* HW block attention registers */ +struct attn_hw_block { + const char *name; /* Block name */ + struct attn_hw_regs chip_regs[1]; +}; + +static struct attn_hw_reg grc_int0_bb_b0 = { + 0, 4, 0x50180, 0x5018c, 0x50188, 0x50184}; + +static struct attn_hw_reg *grc_int_bb_b0_regs[1] = { + &grc_int0_bb_b0}; + +static struct attn_hw_reg grc_prty1_bb_b0 = { + 0, 2, 0x50200, 0x5020c, 0x50208, 0x50204}; + +static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = { + &grc_prty1_bb_b0}; + +static struct attn_hw_reg miscs_int0_bb_b0 = { + 0, 3, 0x9180, 0x918c, 0x9188, 0x9184}; + +static struct attn_hw_reg miscs_int1_bb_b0 = { + 1, 11, 0x9190, 0x919c, 0x9198, 0x9194}; + +static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = { + &miscs_int0_bb_b0, &miscs_int1_bb_b0}; + +static struct attn_hw_reg miscs_prty0_bb_b0 = { + 0, 1, 0x91a0, 0x91ac, 0x91a8, 0x91a4}; + +static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = { + &miscs_prty0_bb_b0}; + +static struct attn_hw_reg misc_int0_bb_b0 = { + 0, 1, 0x8180, 0x818c, 0x8188, 0x8184}; + +static struct attn_hw_reg *misc_int_bb_b0_regs[1] = { + &misc_int0_bb_b0}; + +static struct attn_hw_reg pglue_b_int0_bb_b0 = { + 0, 23, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184}; + +static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = { + &pglue_b_int0_bb_b0}; + +static struct attn_hw_reg pglue_b_prty0_bb_b0 = { + 0, 1, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194}; + +static struct attn_hw_reg pglue_b_prty1_bb_b0 = { + 1, 22, 0x2a8200, 0x2a820c, 0x2a8208, 0x2a8204}; + +static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = { + &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0}; + +static struct attn_hw_reg cnig_int0_bb_b0 = { + 0, 6, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec}; + +static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = { + &cnig_int0_bb_b0}; + +static struct attn_hw_reg cnig_prty0_bb_b0 = { + 0, 2, 0x218348, 0x218354, 0x218350, 0x21834c}; + +static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = { + &cnig_prty0_bb_b0}; + +static struct attn_hw_reg cpmu_int0_bb_b0 = { + 0, 1, 0x303e0, 0x303ec, 0x303e8, 0x303e4}; + +static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = { + &cpmu_int0_bb_b0}; + +static struct attn_hw_reg ncsi_int0_bb_b0 = { + 0, 1, 0x404cc, 0x404d8, 0x404d4, 0x404d0}; + +static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = { + &ncsi_int0_bb_b0}; + +static struct attn_hw_reg ncsi_prty1_bb_b0 = { + 0, 1, 0x40000, 0x4000c, 0x40008, 0x40004}; + +static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = { + &ncsi_prty1_bb_b0}; + +static struct attn_hw_reg opte_prty1_bb_b0 = { + 0, 11, 0x53000, 0x5300c, 0x53008, 0x53004}; + +static struct attn_hw_reg opte_prty0_bb_b0 = { + 1, 1, 0x53208, 0x53214, 0x53210, 0x5320c}; + +static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = { + &opte_prty1_bb_b0, &opte_prty0_bb_b0}; + +static struct attn_hw_reg bmb_int0_bb_b0 = { + 0, 16, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4}; + +static struct attn_hw_reg bmb_int1_bb_b0 = { + 1, 28, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc}; + +static struct attn_hw_reg bmb_int2_bb_b0 = { + 2, 26, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4}; + +static struct attn_hw_reg bmb_int3_bb_b0 = { + 3, 31, 0x540108, 0x540114, 0x540110, 0x54010c}; + +static struct attn_hw_reg bmb_int4_bb_b0 = { + 4, 27, 0x540120, 0x54012c, 0x540128, 0x540124}; + +static struct attn_hw_reg bmb_int5_bb_b0 = { + 5, 29, 0x540138, 0x540144, 0x540140, 0x54013c}; + +static struct attn_hw_reg bmb_int6_bb_b0 = { + 6, 30, 0x540150, 0x54015c, 0x540158, 0x540154}; + +static struct attn_hw_reg bmb_int7_bb_b0 = { + 7, 32, 0x540168, 0x540174, 0x540170, 0x54016c}; + +static struct attn_hw_reg bmb_int8_bb_b0 = { + 8, 32, 0x540184, 0x540190, 0x54018c, 0x540188}; + +static struct attn_hw_reg bmb_int9_bb_b0 = { + 9, 32, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0}; + +static struct attn_hw_reg bmb_int10_bb_b0 = { + 10, 3, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8}; + +static struct attn_hw_reg bmb_int11_bb_b0 = { + 11, 4, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0}; + +static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = { + &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0, + &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0, + &bmb_int8_bb_b0, &bmb_int9_bb_b0, &bmb_int10_bb_b0, &bmb_int11_bb_b0}; + +static struct attn_hw_reg bmb_prty0_bb_b0 = { + 0, 5, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0}; + +static struct attn_hw_reg bmb_prty1_bb_b0 = { + 1, 31, 0x540400, 0x54040c, 0x540408, 0x540404}; + +static struct attn_hw_reg bmb_prty2_bb_b0 = { + 2, 15, 0x540410, 0x54041c, 0x540418, 0x540414}; + +static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = { + &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0}; + +static struct attn_hw_reg pcie_prty1_bb_b0 = { + 0, 17, 0x54000, 0x5400c, 0x54008, 0x54004}; + +static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = { + &pcie_prty1_bb_b0}; + +static struct attn_hw_reg mcp2_prty0_bb_b0 = { + 0, 1, 0x52040, 0x5204c, 0x52048, 0x52044}; + +static struct attn_hw_reg mcp2_prty1_bb_b0 = { + 1, 12, 0x52204, 0x52210, 0x5220c, 0x52208}; + +static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = { + &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0}; + +static struct attn_hw_reg pswhst_int0_bb_b0 = { + 0, 18, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184}; + +static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = { + &pswhst_int0_bb_b0}; + +static struct attn_hw_reg pswhst_prty0_bb_b0 = { + 0, 1, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194}; + +static struct attn_hw_reg pswhst_prty1_bb_b0 = { + 1, 17, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204}; + +static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = { + &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0}; + +static struct attn_hw_reg pswhst2_int0_bb_b0 = { + 0, 5, 0x29e180, 0x29e18c, 0x29e188, 0x29e184}; + +static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = { + &pswhst2_int0_bb_b0}; + +static struct attn_hw_reg pswhst2_prty0_bb_b0 = { + 0, 1, 0x29e190, 0x29e19c, 0x29e198, 0x29e194}; + +static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = { + &pswhst2_prty0_bb_b0}; + +static struct attn_hw_reg pswrd_int0_bb_b0 = { + 0, 3, 0x29c180, 0x29c18c, 0x29c188, 0x29c184}; + +static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = { + &pswrd_int0_bb_b0}; + +static struct attn_hw_reg pswrd_prty0_bb_b0 = { + 0, 1, 0x29c190, 0x29c19c, 0x29c198, 0x29c194}; + +static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = { + &pswrd_prty0_bb_b0}; + +static struct attn_hw_reg pswrd2_int0_bb_b0 = { + 0, 5, 0x29d180, 0x29d18c, 0x29d188, 0x29d184}; + +static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = { + &pswrd2_int0_bb_b0}; + +static struct attn_hw_reg pswrd2_prty0_bb_b0 = { + 0, 1, 0x29d190, 0x29d19c, 0x29d198, 0x29d194}; + +static struct attn_hw_reg pswrd2_prty1_bb_b0 = { + 1, 31, 0x29d200, 0x29d20c, 0x29d208, 0x29d204}; + +static struct attn_hw_reg pswrd2_prty2_bb_b0 = { + 2, 3, 0x29d210, 0x29d21c, 0x29d218, 0x29d214}; + +static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = { + &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0}; + +static struct attn_hw_reg pswwr_int0_bb_b0 = { + 0, 16, 0x29a180, 0x29a18c, 0x29a188, 0x29a184}; + +static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = { + &pswwr_int0_bb_b0}; + +static struct attn_hw_reg pswwr_prty0_bb_b0 = { + 0, 1, 0x29a190, 0x29a19c, 0x29a198, 0x29a194}; + +static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = { + &pswwr_prty0_bb_b0}; + +static struct attn_hw_reg pswwr2_int0_bb_b0 = { + 0, 19, 0x29b180, 0x29b18c, 0x29b188, 0x29b184}; + +static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = { + &pswwr2_int0_bb_b0}; + +static struct attn_hw_reg pswwr2_prty0_bb_b0 = { + 0, 1, 0x29b190, 0x29b19c, 0x29b198, 0x29b194}; + +static struct attn_hw_reg pswwr2_prty1_bb_b0 = { + 1, 31, 0x29b200, 0x29b20c, 0x29b208, 0x29b204}; + +static struct attn_hw_reg pswwr2_prty2_bb_b0 = { + 2, 31, 0x29b210, 0x29b21c, 0x29b218, 0x29b214}; + +static struct attn_hw_reg pswwr2_prty3_bb_b0 = { + 3, 31, 0x29b220, 0x29b22c, 0x29b228, 0x29b224}; + +static struct attn_hw_reg pswwr2_prty4_bb_b0 = { + 4, 20, 0x29b230, 0x29b23c, 0x29b238, 0x29b234}; + +static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = { + &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0, + &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0}; + +static struct attn_hw_reg pswrq_int0_bb_b0 = { + 0, 21, 0x280180, 0x28018c, 0x280188, 0x280184}; + +static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = { + &pswrq_int0_bb_b0}; + +static struct attn_hw_reg pswrq_prty0_bb_b0 = { + 0, 1, 0x280190, 0x28019c, 0x280198, 0x280194}; + +static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = { + &pswrq_prty0_bb_b0}; + +static struct attn_hw_reg pswrq2_int0_bb_b0 = { + 0, 15, 0x240180, 0x24018c, 0x240188, 0x240184}; + +static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = { + &pswrq2_int0_bb_b0}; + +static struct attn_hw_reg pswrq2_prty1_bb_b0 = { + 0, 9, 0x240200, 0x24020c, 0x240208, 0x240204}; + +static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = { + &pswrq2_prty1_bb_b0}; + +static struct attn_hw_reg pglcs_int0_bb_b0 = { + 0, 1, 0x1d00, 0x1d0c, 0x1d08, 0x1d04}; + +static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = { + &pglcs_int0_bb_b0}; + +static struct attn_hw_reg dmae_int0_bb_b0 = { + 0, 2, 0xc180, 0xc18c, 0xc188, 0xc184}; + +static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = { + &dmae_int0_bb_b0}; + +static struct attn_hw_reg dmae_prty1_bb_b0 = { + 0, 3, 0xc200, 0xc20c, 0xc208, 0xc204}; + +static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = { + &dmae_prty1_bb_b0}; + +static struct attn_hw_reg ptu_int0_bb_b0 = { + 0, 8, 0x560180, 0x56018c, 0x560188, 0x560184}; + +static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = { + &ptu_int0_bb_b0}; + +static struct attn_hw_reg ptu_prty1_bb_b0 = { + 0, 18, 0x560200, 0x56020c, 0x560208, 0x560204}; + +static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = { + &ptu_prty1_bb_b0}; + +static struct attn_hw_reg tcm_int0_bb_b0 = { + 0, 8, 0x1180180, 0x118018c, 0x1180188, 0x1180184}; + +static struct attn_hw_reg tcm_int1_bb_b0 = { + 1, 32, 0x1180190, 0x118019c, 0x1180198, 0x1180194}; + +static struct attn_hw_reg tcm_int2_bb_b0 = { + 2, 1, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4}; + +static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = { + &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0}; + +static struct attn_hw_reg tcm_prty1_bb_b0 = { + 0, 31, 0x1180200, 0x118020c, 0x1180208, 0x1180204}; + +static struct attn_hw_reg tcm_prty2_bb_b0 = { + 1, 2, 0x1180210, 0x118021c, 0x1180218, 0x1180214}; + +static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = { + &tcm_prty1_bb_b0, &tcm_prty2_bb_b0}; + +static struct attn_hw_reg mcm_int0_bb_b0 = { + 0, 14, 0x1200180, 0x120018c, 0x1200188, 0x1200184}; + +static struct attn_hw_reg mcm_int1_bb_b0 = { + 1, 26, 0x1200190, 0x120019c, 0x1200198, 0x1200194}; + +static struct attn_hw_reg mcm_int2_bb_b0 = { + 2, 1, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4}; + +static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = { + &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0}; + +static struct attn_hw_reg mcm_prty1_bb_b0 = { + 0, 31, 0x1200200, 0x120020c, 0x1200208, 0x1200204}; + +static struct attn_hw_reg mcm_prty2_bb_b0 = { + 1, 4, 0x1200210, 0x120021c, 0x1200218, 0x1200214}; + +static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = { + &mcm_prty1_bb_b0, &mcm_prty2_bb_b0}; + +static struct attn_hw_reg ucm_int0_bb_b0 = { + 0, 17, 0x1280180, 0x128018c, 0x1280188, 0x1280184}; + +static struct attn_hw_reg ucm_int1_bb_b0 = { + 1, 29, 0x1280190, 0x128019c, 0x1280198, 0x1280194}; + +static struct attn_hw_reg ucm_int2_bb_b0 = { + 2, 1, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4}; + +static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = { + &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0}; + +static struct attn_hw_reg ucm_prty1_bb_b0 = { + 0, 31, 0x1280200, 0x128020c, 0x1280208, 0x1280204}; + +static struct attn_hw_reg ucm_prty2_bb_b0 = { + 1, 7, 0x1280210, 0x128021c, 0x1280218, 0x1280214}; + +static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = { + &ucm_prty1_bb_b0, &ucm_prty2_bb_b0}; + +static struct attn_hw_reg xcm_int0_bb_b0 = { + 0, 16, 0x1000180, 0x100018c, 0x1000188, 0x1000184}; + +static struct attn_hw_reg xcm_int1_bb_b0 = { + 1, 25, 0x1000190, 0x100019c, 0x1000198, 0x1000194}; + +static struct attn_hw_reg xcm_int2_bb_b0 = { + 2, 8, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4}; + +static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = { + &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0}; + +static struct attn_hw_reg xcm_prty1_bb_b0 = { + 0, 31, 0x1000200, 0x100020c, 0x1000208, 0x1000204}; + +static struct attn_hw_reg xcm_prty2_bb_b0 = { + 1, 11, 0x1000210, 0x100021c, 0x1000218, 0x1000214}; + +static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = { + &xcm_prty1_bb_b0, &xcm_prty2_bb_b0}; + +static struct attn_hw_reg ycm_int0_bb_b0 = { + 0, 13, 0x1080180, 0x108018c, 0x1080188, 0x1080184}; + +static struct attn_hw_reg ycm_int1_bb_b0 = { + 1, 23, 0x1080190, 0x108019c, 0x1080198, 0x1080194}; + +static struct attn_hw_reg ycm_int2_bb_b0 = { + 2, 1, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4}; + +static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = { + &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0}; + +static struct attn_hw_reg ycm_prty1_bb_b0 = { + 0, 31, 0x1080200, 0x108020c, 0x1080208, 0x1080204}; + +static struct attn_hw_reg ycm_prty2_bb_b0 = { + 1, 3, 0x1080210, 0x108021c, 0x1080218, 0x1080214}; + +static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = { + &ycm_prty1_bb_b0, &ycm_prty2_bb_b0}; + +static struct attn_hw_reg pcm_int0_bb_b0 = { + 0, 5, 0x1100180, 0x110018c, 0x1100188, 0x1100184}; + +static struct attn_hw_reg pcm_int1_bb_b0 = { + 1, 14, 0x1100190, 0x110019c, 0x1100198, 0x1100194}; + +static struct attn_hw_reg pcm_int2_bb_b0 = { + 2, 1, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4}; + +static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = { + &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0}; + +static struct attn_hw_reg pcm_prty1_bb_b0 = { + 0, 11, 0x1100200, 0x110020c, 0x1100208, 0x1100204}; + +static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = { + &pcm_prty1_bb_b0}; + +static struct attn_hw_reg qm_int0_bb_b0 = { + 0, 22, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184}; + +static struct attn_hw_reg *qm_int_bb_b0_regs[1] = { + &qm_int0_bb_b0}; + +static struct attn_hw_reg qm_prty0_bb_b0 = { + 0, 11, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194}; + +static struct attn_hw_reg qm_prty1_bb_b0 = { + 1, 31, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204}; + +static struct attn_hw_reg qm_prty2_bb_b0 = { + 2, 31, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214}; + +static struct attn_hw_reg qm_prty3_bb_b0 = { + 3, 11, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224}; + +static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = { + &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0}; + +static struct attn_hw_reg tm_int0_bb_b0 = { + 0, 32, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184}; + +static struct attn_hw_reg tm_int1_bb_b0 = { + 1, 11, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194}; + +static struct attn_hw_reg *tm_int_bb_b0_regs[2] = { + &tm_int0_bb_b0, &tm_int1_bb_b0}; + +static struct attn_hw_reg tm_prty1_bb_b0 = { + 0, 17, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204}; + +static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = { + &tm_prty1_bb_b0}; + +static struct attn_hw_reg dorq_int0_bb_b0 = { + 0, 9, 0x100180, 0x10018c, 0x100188, 0x100184}; + +static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = { + &dorq_int0_bb_b0}; + +static struct attn_hw_reg dorq_prty0_bb_b0 = { + 0, 1, 0x100190, 0x10019c, 0x100198, 0x100194}; + +static struct attn_hw_reg dorq_prty1_bb_b0 = { + 1, 6, 0x100200, 0x10020c, 0x100208, 0x100204}; + +static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = { + &dorq_prty0_bb_b0, &dorq_prty1_bb_b0}; + +static struct attn_hw_reg brb_int0_bb_b0 = { + 0, 32, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4}; + +static struct attn_hw_reg brb_int1_bb_b0 = { + 1, 30, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc}; + +static struct attn_hw_reg brb_int2_bb_b0 = { + 2, 28, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4}; + +static struct attn_hw_reg brb_int3_bb_b0 = { + 3, 31, 0x340108, 0x340114, 0x340110, 0x34010c}; + +static struct attn_hw_reg brb_int4_bb_b0 = { + 4, 27, 0x340120, 0x34012c, 0x340128, 0x340124}; + +static struct attn_hw_reg brb_int5_bb_b0 = { + 5, 1, 0x340138, 0x340144, 0x340140, 0x34013c}; + +static struct attn_hw_reg brb_int6_bb_b0 = { + 6, 8, 0x340150, 0x34015c, 0x340158, 0x340154}; + +static struct attn_hw_reg brb_int7_bb_b0 = { + 7, 32, 0x340168, 0x340174, 0x340170, 0x34016c}; + +static struct attn_hw_reg brb_int8_bb_b0 = { + 8, 17, 0x340184, 0x340190, 0x34018c, 0x340188}; + +static struct attn_hw_reg brb_int9_bb_b0 = { + 9, 1, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0}; + +static struct attn_hw_reg brb_int10_bb_b0 = { + 10, 14, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8}; + +static struct attn_hw_reg brb_int11_bb_b0 = { + 11, 8, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0}; + +static struct attn_hw_reg *brb_int_bb_b0_regs[12] = { + &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0, + &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0, + &brb_int8_bb_b0, &brb_int9_bb_b0, &brb_int10_bb_b0, &brb_int11_bb_b0}; + +static struct attn_hw_reg brb_prty0_bb_b0 = { + 0, 5, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0}; + +static struct attn_hw_reg brb_prty1_bb_b0 = { + 1, 31, 0x340400, 0x34040c, 0x340408, 0x340404}; + +static struct attn_hw_reg brb_prty2_bb_b0 = { + 2, 14, 0x340410, 0x34041c, 0x340418, 0x340414}; + +static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = { + &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0}; + +static struct attn_hw_reg src_int0_bb_b0 = { + 0, 1, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4}; + +static struct attn_hw_reg *src_int_bb_b0_regs[1] = { + &src_int0_bb_b0}; + +static struct attn_hw_reg prs_int0_bb_b0 = { + 0, 2, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044}; + +static struct attn_hw_reg *prs_int_bb_b0_regs[1] = { + &prs_int0_bb_b0}; + +static struct attn_hw_reg prs_prty0_bb_b0 = { + 0, 2, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054}; + +static struct attn_hw_reg prs_prty1_bb_b0 = { + 1, 31, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208}; + +static struct attn_hw_reg prs_prty2_bb_b0 = { + 2, 5, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218}; + +static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = { + &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0}; + +static struct attn_hw_reg tsdm_int0_bb_b0 = { + 0, 26, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044}; + +static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = { + &tsdm_int0_bb_b0}; + +static struct attn_hw_reg tsdm_prty1_bb_b0 = { + 0, 10, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204}; + +static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = { + &tsdm_prty1_bb_b0}; + +static struct attn_hw_reg msdm_int0_bb_b0 = { + 0, 26, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044}; + +static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = { + &msdm_int0_bb_b0}; + +static struct attn_hw_reg msdm_prty1_bb_b0 = { + 0, 11, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204}; + +static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = { + &msdm_prty1_bb_b0}; + +static struct attn_hw_reg usdm_int0_bb_b0 = { + 0, 26, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044}; + +static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = { + &usdm_int0_bb_b0}; + +static struct attn_hw_reg usdm_prty1_bb_b0 = { + 0, 10, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204}; + +static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = { + &usdm_prty1_bb_b0}; + +static struct attn_hw_reg xsdm_int0_bb_b0 = { + 0, 26, 0xf80040, 0xf8004c, 0xf80048, 0xf80044}; + +static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = { + &xsdm_int0_bb_b0}; + +static struct attn_hw_reg xsdm_prty1_bb_b0 = { + 0, 10, 0xf80200, 0xf8020c, 0xf80208, 0xf80204}; + +static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = { + &xsdm_prty1_bb_b0}; + +static struct attn_hw_reg ysdm_int0_bb_b0 = { + 0, 26, 0xf90040, 0xf9004c, 0xf90048, 0xf90044}; + +static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = { + &ysdm_int0_bb_b0}; + +static struct attn_hw_reg ysdm_prty1_bb_b0 = { + 0, 9, 0xf90200, 0xf9020c, 0xf90208, 0xf90204}; + +static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = { + &ysdm_prty1_bb_b0}; + +static struct attn_hw_reg psdm_int0_bb_b0 = { + 0, 26, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044}; + +static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = { + &psdm_int0_bb_b0}; + +static struct attn_hw_reg psdm_prty1_bb_b0 = { + 0, 9, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204}; + +static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = { + &psdm_prty1_bb_b0}; + +static struct attn_hw_reg tsem_int0_bb_b0 = { + 0, 32, 0x1700040, 0x170004c, 0x1700048, 0x1700044}; + +static struct attn_hw_reg tsem_int1_bb_b0 = { + 1, 13, 0x1700050, 0x170005c, 0x1700058, 0x1700054}; + +static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1740040, 0x174004c, 0x1740048, 0x1740044}; + +static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = { + &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg tsem_prty0_bb_b0 = { + 0, 3, 0x17000c8, 0x17000d4, 0x17000d0, 0x17000cc}; + +static struct attn_hw_reg tsem_prty1_bb_b0 = { + 1, 6, 0x1700200, 0x170020c, 0x1700208, 0x1700204}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, 0x174a200, 0x174a20c, 0x174a208, 0x174a204}; + +static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = { + &tsem_prty0_bb_b0, &tsem_prty1_bb_b0, + &tsem_fast_memory_vfc_config_prty1_bb_b0}; + +static struct attn_hw_reg msem_int0_bb_b0 = { + 0, 32, 0x1800040, 0x180004c, 0x1800048, 0x1800044}; + +static struct attn_hw_reg msem_int1_bb_b0 = { + 1, 13, 0x1800050, 0x180005c, 0x1800058, 0x1800054}; + +static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1840040, 0x184004c, 0x1840048, 0x1840044}; + +static struct attn_hw_reg *msem_int_bb_b0_regs[3] = { + &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg msem_prty0_bb_b0 = { + 0, 3, 0x18000c8, 0x18000d4, 0x18000d0, 0x18000cc}; + +static struct attn_hw_reg msem_prty1_bb_b0 = { + 1, 6, 0x1800200, 0x180020c, 0x1800208, 0x1800204}; + +static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = { + &msem_prty0_bb_b0, &msem_prty1_bb_b0}; + +static struct attn_hw_reg usem_int0_bb_b0 = { + 0, 32, 0x1900040, 0x190004c, 0x1900048, 0x1900044}; + +static struct attn_hw_reg usem_int1_bb_b0 = { + 1, 13, 0x1900050, 0x190005c, 0x1900058, 0x1900054}; + +static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1940040, 0x194004c, 0x1940048, 0x1940044}; + +static struct attn_hw_reg *usem_int_bb_b0_regs[3] = { + &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg usem_prty0_bb_b0 = { + 0, 3, 0x19000c8, 0x19000d4, 0x19000d0, 0x19000cc}; + +static struct attn_hw_reg usem_prty1_bb_b0 = { + 1, 6, 0x1900200, 0x190020c, 0x1900208, 0x1900204}; + +static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = { + &usem_prty0_bb_b0, &usem_prty1_bb_b0}; + +static struct attn_hw_reg xsem_int0_bb_b0 = { + 0, 32, 0x1400040, 0x140004c, 0x1400048, 0x1400044}; + +static struct attn_hw_reg xsem_int1_bb_b0 = { + 1, 13, 0x1400050, 0x140005c, 0x1400058, 0x1400054}; + +static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1440040, 0x144004c, 0x1440048, 0x1440044}; + +static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = { + &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg xsem_prty0_bb_b0 = { + 0, 3, 0x14000c8, 0x14000d4, 0x14000d0, 0x14000cc}; + +static struct attn_hw_reg xsem_prty1_bb_b0 = { + 1, 7, 0x1400200, 0x140020c, 0x1400208, 0x1400204}; + +static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = { + &xsem_prty0_bb_b0, &xsem_prty1_bb_b0}; + +static struct attn_hw_reg ysem_int0_bb_b0 = { + 0, 32, 0x1500040, 0x150004c, 0x1500048, 0x1500044}; + +static struct attn_hw_reg ysem_int1_bb_b0 = { + 1, 13, 0x1500050, 0x150005c, 0x1500058, 0x1500054}; + +static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1540040, 0x154004c, 0x1540048, 0x1540044}; + +static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = { + &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg ysem_prty0_bb_b0 = { + 0, 3, 0x15000c8, 0x15000d4, 0x15000d0, 0x15000cc}; + +static struct attn_hw_reg ysem_prty1_bb_b0 = { + 1, 7, 0x1500200, 0x150020c, 0x1500208, 0x1500204}; + +static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = { + &ysem_prty0_bb_b0, &ysem_prty1_bb_b0}; + +static struct attn_hw_reg psem_int0_bb_b0 = { + 0, 32, 0x1600040, 0x160004c, 0x1600048, 0x1600044}; + +static struct attn_hw_reg psem_int1_bb_b0 = { + 1, 13, 0x1600050, 0x160005c, 0x1600058, 0x1600054}; + +static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = { + 2, 1, 0x1640040, 0x164004c, 0x1640048, 0x1640044}; + +static struct attn_hw_reg *psem_int_bb_b0_regs[3] = { + &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0}; + +static struct attn_hw_reg psem_prty0_bb_b0 = { + 0, 3, 0x16000c8, 0x16000d4, 0x16000d0, 0x16000cc}; + +static struct attn_hw_reg psem_prty1_bb_b0 = { + 1, 6, 0x1600200, 0x160020c, 0x1600208, 0x1600204}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, 0x164a200, 0x164a20c, 0x164a208, 0x164a204}; + +static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = { + &psem_prty0_bb_b0, &psem_prty1_bb_b0, + &psem_fast_memory_vfc_config_prty1_bb_b0}; + +static struct attn_hw_reg rss_int0_bb_b0 = { + 0, 12, 0x238980, 0x23898c, 0x238988, 0x238984}; + +static struct attn_hw_reg *rss_int_bb_b0_regs[1] = { + &rss_int0_bb_b0}; + +static struct attn_hw_reg rss_prty1_bb_b0 = { + 0, 4, 0x238a00, 0x238a0c, 0x238a08, 0x238a04}; + +static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = { + &rss_prty1_bb_b0}; + +static struct attn_hw_reg tmld_int0_bb_b0 = { + 0, 6, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184}; + +static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = { + &tmld_int0_bb_b0}; + +static struct attn_hw_reg tmld_prty1_bb_b0 = { + 0, 8, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204}; + +static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = { + &tmld_prty1_bb_b0}; + +static struct attn_hw_reg muld_int0_bb_b0 = { + 0, 6, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184}; + +static struct attn_hw_reg *muld_int_bb_b0_regs[1] = { + &muld_int0_bb_b0}; + +static struct attn_hw_reg muld_prty1_bb_b0 = { + 0, 10, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204}; + +static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = { + &muld_prty1_bb_b0}; + +static struct attn_hw_reg yuld_int0_bb_b0 = { + 0, 6, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184}; + +static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = { + &yuld_int0_bb_b0}; + +static struct attn_hw_reg yuld_prty1_bb_b0 = { + 0, 6, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204}; + +static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = { + &yuld_prty1_bb_b0}; + +static struct attn_hw_reg xyld_int0_bb_b0 = { + 0, 6, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184}; + +static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = { + &xyld_int0_bb_b0}; + +static struct attn_hw_reg xyld_prty1_bb_b0 = { + 0, 9, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204}; + +static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = { + &xyld_prty1_bb_b0}; + +static struct attn_hw_reg prm_int0_bb_b0 = { + 0, 11, 0x230040, 0x23004c, 0x230048, 0x230044}; + +static struct attn_hw_reg *prm_int_bb_b0_regs[1] = { + &prm_int0_bb_b0}; + +static struct attn_hw_reg prm_prty0_bb_b0 = { + 0, 1, 0x230050, 0x23005c, 0x230058, 0x230054}; + +static struct attn_hw_reg prm_prty1_bb_b0 = { + 1, 24, 0x230200, 0x23020c, 0x230208, 0x230204}; + +static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = { + &prm_prty0_bb_b0, &prm_prty1_bb_b0}; + +static struct attn_hw_reg pbf_pb1_int0_bb_b0 = { + 0, 9, 0xda0040, 0xda004c, 0xda0048, 0xda0044}; + +static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = { + &pbf_pb1_int0_bb_b0}; + +static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = { + 0, 1, 0xda0050, 0xda005c, 0xda0058, 0xda0054}; + +static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = { + &pbf_pb1_prty0_bb_b0}; + +static struct attn_hw_reg pbf_pb2_int0_bb_b0 = { + 0, 9, 0xda4040, 0xda404c, 0xda4048, 0xda4044}; + +static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = { + &pbf_pb2_int0_bb_b0}; + +static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = { + 0, 1, 0xda4050, 0xda405c, 0xda4058, 0xda4054}; + +static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = { + &pbf_pb2_prty0_bb_b0}; + +static struct attn_hw_reg rpb_int0_bb_b0 = { + 0, 9, 0x23c040, 0x23c04c, 0x23c048, 0x23c044}; + +static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = { + &rpb_int0_bb_b0}; + +static struct attn_hw_reg rpb_prty0_bb_b0 = { + 0, 1, 0x23c050, 0x23c05c, 0x23c058, 0x23c054}; + +static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = { + &rpb_prty0_bb_b0}; + +static struct attn_hw_reg btb_int0_bb_b0 = { + 0, 16, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4}; + +static struct attn_hw_reg btb_int1_bb_b0 = { + 1, 16, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc}; + +static struct attn_hw_reg btb_int2_bb_b0 = { + 2, 4, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4}; + +static struct attn_hw_reg btb_int3_bb_b0 = { + 3, 32, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c}; + +static struct attn_hw_reg btb_int4_bb_b0 = { + 4, 23, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124}; + +static struct attn_hw_reg btb_int5_bb_b0 = { + 5, 32, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c}; + +static struct attn_hw_reg btb_int6_bb_b0 = { + 6, 1, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154}; + +static struct attn_hw_reg btb_int8_bb_b0 = { + 7, 1, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188}; + +static struct attn_hw_reg btb_int9_bb_b0 = { + 8, 1, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0}; + +static struct attn_hw_reg btb_int10_bb_b0 = { + 9, 1, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8}; + +static struct attn_hw_reg btb_int11_bb_b0 = { + 10, 2, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0}; + +static struct attn_hw_reg *btb_int_bb_b0_regs[11] = { + &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0, + &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0, + &btb_int9_bb_b0, &btb_int10_bb_b0, &btb_int11_bb_b0}; + +static struct attn_hw_reg btb_prty0_bb_b0 = { + 0, 5, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0}; + +static struct attn_hw_reg btb_prty1_bb_b0 = { + 1, 23, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404}; + +static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = { + &btb_prty0_bb_b0, &btb_prty1_bb_b0}; + +static struct attn_hw_reg pbf_int0_bb_b0 = { + 0, 1, 0xd80180, 0xd8018c, 0xd80188, 0xd80184}; + +static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = { + &pbf_int0_bb_b0}; + +static struct attn_hw_reg pbf_prty0_bb_b0 = { + 0, 1, 0xd80190, 0xd8019c, 0xd80198, 0xd80194}; + +static struct attn_hw_reg pbf_prty1_bb_b0 = { + 1, 31, 0xd80200, 0xd8020c, 0xd80208, 0xd80204}; + +static struct attn_hw_reg pbf_prty2_bb_b0 = { + 2, 27, 0xd80210, 0xd8021c, 0xd80218, 0xd80214}; + +static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = { + &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0}; + +static struct attn_hw_reg rdif_int0_bb_b0 = { + 0, 8, 0x300180, 0x30018c, 0x300188, 0x300184}; + +static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = { + &rdif_int0_bb_b0}; + +static struct attn_hw_reg rdif_prty0_bb_b0 = { + 0, 1, 0x300190, 0x30019c, 0x300198, 0x300194}; + +static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = { + &rdif_prty0_bb_b0}; + +static struct attn_hw_reg tdif_int0_bb_b0 = { + 0, 8, 0x310180, 0x31018c, 0x310188, 0x310184}; + +static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = { + &tdif_int0_bb_b0}; + +static struct attn_hw_reg tdif_prty0_bb_b0 = { + 0, 1, 0x310190, 0x31019c, 0x310198, 0x310194}; + +static struct attn_hw_reg tdif_prty1_bb_b0 = { + 1, 11, 0x310200, 0x31020c, 0x310208, 0x310204}; + +static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = { + &tdif_prty0_bb_b0, &tdif_prty1_bb_b0}; + +static struct attn_hw_reg cdu_int0_bb_b0 = { + 0, 8, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc}; + +static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = { + &cdu_int0_bb_b0}; + +static struct attn_hw_reg cdu_prty1_bb_b0 = { + 0, 5, 0x580200, 0x58020c, 0x580208, 0x580204}; + +static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = { + &cdu_prty1_bb_b0}; + +static struct attn_hw_reg ccfc_int0_bb_b0 = { + 0, 2, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184}; + +static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = { + &ccfc_int0_bb_b0}; + +static struct attn_hw_reg ccfc_prty1_bb_b0 = { + 0, 2, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204}; + +static struct attn_hw_reg ccfc_prty0_bb_b0 = { + 1, 6, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8}; + +static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = { + &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0}; + +static struct attn_hw_reg tcfc_int0_bb_b0 = { + 0, 2, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184}; + +static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = { + &tcfc_int0_bb_b0}; + +static struct attn_hw_reg tcfc_prty1_bb_b0 = { + 0, 2, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204}; + +static struct attn_hw_reg tcfc_prty0_bb_b0 = { + 1, 6, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8}; + +static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = { + &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0}; + +static struct attn_hw_reg igu_int0_bb_b0 = { + 0, 11, 0x180180, 0x18018c, 0x180188, 0x180184}; + +static struct attn_hw_reg *igu_int_bb_b0_regs[1] = { + &igu_int0_bb_b0}; + +static struct attn_hw_reg igu_prty0_bb_b0 = { + 0, 1, 0x180190, 0x18019c, 0x180198, 0x180194}; + +static struct attn_hw_reg igu_prty1_bb_b0 = { + 1, 31, 0x180200, 0x18020c, 0x180208, 0x180204}; + +static struct attn_hw_reg igu_prty2_bb_b0 = { + 2, 1, 0x180210, 0x18021c, 0x180218, 0x180214}; + +static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = { + &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0}; + +static struct attn_hw_reg cau_int0_bb_b0 = { + 0, 11, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0}; + +static struct attn_hw_reg *cau_int_bb_b0_regs[1] = { + &cau_int0_bb_b0}; + +static struct attn_hw_reg cau_prty1_bb_b0 = { + 0, 13, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204}; + +static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = { + &cau_prty1_bb_b0}; + +static struct attn_hw_reg dbg_int0_bb_b0 = { + 0, 1, 0x10180, 0x1018c, 0x10188, 0x10184}; + +static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = { + &dbg_int0_bb_b0}; + +static struct attn_hw_reg dbg_prty1_bb_b0 = { + 0, 1, 0x10200, 0x1020c, 0x10208, 0x10204}; + +static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = { + &dbg_prty1_bb_b0}; + +static struct attn_hw_reg nig_int0_bb_b0 = { + 0, 12, 0x500040, 0x50004c, 0x500048, 0x500044}; + +static struct attn_hw_reg nig_int1_bb_b0 = { + 1, 32, 0x500050, 0x50005c, 0x500058, 0x500054}; + +static struct attn_hw_reg nig_int2_bb_b0 = { + 2, 20, 0x500060, 0x50006c, 0x500068, 0x500064}; + +static struct attn_hw_reg nig_int3_bb_b0 = { + 3, 18, 0x500070, 0x50007c, 0x500078, 0x500074}; + +static struct attn_hw_reg nig_int4_bb_b0 = { + 4, 20, 0x500080, 0x50008c, 0x500088, 0x500084}; + +static struct attn_hw_reg nig_int5_bb_b0 = { + 5, 18, 0x500090, 0x50009c, 0x500098, 0x500094}; + +static struct attn_hw_reg *nig_int_bb_b0_regs[6] = { + &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0, + &nig_int4_bb_b0, &nig_int5_bb_b0}; + +static struct attn_hw_reg nig_prty0_bb_b0 = { + 0, 1, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4}; + +static struct attn_hw_reg nig_prty1_bb_b0 = { + 1, 31, 0x500200, 0x50020c, 0x500208, 0x500204}; + +static struct attn_hw_reg nig_prty2_bb_b0 = { + 2, 31, 0x500210, 0x50021c, 0x500218, 0x500214}; + +static struct attn_hw_reg nig_prty3_bb_b0 = { + 3, 31, 0x500220, 0x50022c, 0x500228, 0x500224}; + +static struct attn_hw_reg nig_prty4_bb_b0 = { + 4, 17, 0x500230, 0x50023c, 0x500238, 0x500234}; + +static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = { + &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, + &nig_prty3_bb_b0, &nig_prty4_bb_b0}; + +static struct attn_hw_reg ipc_int0_bb_b0 = { + 0, 13, 0x2050c, 0x20518, 0x20514, 0x20510}; + +static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = { + &ipc_int0_bb_b0}; + +static struct attn_hw_reg ipc_prty0_bb_b0 = { + 0, 1, 0x2051c, 0x20528, 0x20524, 0x20520}; + +static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = { + &ipc_prty0_bb_b0}; + +static struct attn_hw_block attn_blocks[] = { + {"grc", {{1, 1, grc_int_bb_b0_regs, grc_prty_bb_b0_regs} } }, + {"miscs", {{2, 1, miscs_int_bb_b0_regs, miscs_prty_bb_b0_regs} } }, + {"misc", {{1, 0, misc_int_bb_b0_regs, NULL} } }, + {"dbu", {{0, 0, NULL, NULL} } }, + {"pglue_b", {{1, 2, pglue_b_int_bb_b0_regs, + pglue_b_prty_bb_b0_regs} } }, + {"cnig", {{1, 1, cnig_int_bb_b0_regs, cnig_prty_bb_b0_regs} } }, + {"cpmu", {{1, 0, cpmu_int_bb_b0_regs, NULL} } }, + {"ncsi", {{1, 1, ncsi_int_bb_b0_regs, ncsi_prty_bb_b0_regs} } }, + {"opte", {{0, 2, NULL, opte_prty_bb_b0_regs} } }, + {"bmb", {{12, 3, bmb_int_bb_b0_regs, bmb_prty_bb_b0_regs} } }, + {"pcie", {{0, 1, NULL, pcie_prty_bb_b0_regs} } }, + {"mcp", {{0, 0, NULL, NULL} } }, + {"mcp2", {{0, 2, NULL, mcp2_prty_bb_b0_regs} } }, + {"pswhst", {{1, 2, pswhst_int_bb_b0_regs, pswhst_prty_bb_b0_regs} } }, + {"pswhst2", {{1, 1, pswhst2_int_bb_b0_regs, + pswhst2_prty_bb_b0_regs} } }, + {"pswrd", {{1, 1, pswrd_int_bb_b0_regs, pswrd_prty_bb_b0_regs} } }, + {"pswrd2", {{1, 3, pswrd2_int_bb_b0_regs, pswrd2_prty_bb_b0_regs} } }, + {"pswwr", {{1, 1, pswwr_int_bb_b0_regs, pswwr_prty_bb_b0_regs} } }, + {"pswwr2", {{1, 5, pswwr2_int_bb_b0_regs, pswwr2_prty_bb_b0_regs} } }, + {"pswrq", {{1, 1, pswrq_int_bb_b0_regs, pswrq_prty_bb_b0_regs} } }, + {"pswrq2", {{1, 1, pswrq2_int_bb_b0_regs, pswrq2_prty_bb_b0_regs} } }, + {"pglcs", {{1, 0, pglcs_int_bb_b0_regs, NULL} } }, + {"dmae", {{1, 1, dmae_int_bb_b0_regs, dmae_prty_bb_b0_regs} } }, + {"ptu", {{1, 1, ptu_int_bb_b0_regs, ptu_prty_bb_b0_regs} } }, + {"tcm", {{3, 2, tcm_int_bb_b0_regs, tcm_prty_bb_b0_regs} } }, + {"mcm", {{3, 2, mcm_int_bb_b0_regs, mcm_prty_bb_b0_regs} } }, + {"ucm", {{3, 2, ucm_int_bb_b0_regs, ucm_prty_bb_b0_regs} } }, + {"xcm", {{3, 2, xcm_int_bb_b0_regs, xcm_prty_bb_b0_regs} } }, + {"ycm", {{3, 2, ycm_int_bb_b0_regs, ycm_prty_bb_b0_regs} } }, + {"pcm", {{3, 1, pcm_int_bb_b0_regs, pcm_prty_bb_b0_regs} } }, + {"qm", {{1, 4, qm_int_bb_b0_regs, qm_prty_bb_b0_regs} } }, + {"tm", {{2, 1, tm_int_bb_b0_regs, tm_prty_bb_b0_regs} } }, + {"dorq", {{1, 2, dorq_int_bb_b0_regs, dorq_prty_bb_b0_regs} } }, + {"brb", {{12, 3, brb_int_bb_b0_regs, brb_prty_bb_b0_regs} } }, + {"src", {{1, 0, src_int_bb_b0_regs, NULL} } }, + {"prs", {{1, 3, prs_int_bb_b0_regs, prs_prty_bb_b0_regs} } }, + {"tsdm", {{1, 1, tsdm_int_bb_b0_regs, tsdm_prty_bb_b0_regs} } }, + {"msdm", {{1, 1, msdm_int_bb_b0_regs, msdm_prty_bb_b0_regs} } }, + {"usdm", {{1, 1, usdm_int_bb_b0_regs, usdm_prty_bb_b0_regs} } }, + {"xsdm", {{1, 1, xsdm_int_bb_b0_regs, xsdm_prty_bb_b0_regs} } }, + {"ysdm", {{1, 1, ysdm_int_bb_b0_regs, ysdm_prty_bb_b0_regs} } }, + {"psdm", {{1, 1, psdm_int_bb_b0_regs, psdm_prty_bb_b0_regs} } }, + {"tsem", {{3, 3, tsem_int_bb_b0_regs, tsem_prty_bb_b0_regs} } }, + {"msem", {{3, 2, msem_int_bb_b0_regs, msem_prty_bb_b0_regs} } }, + {"usem", {{3, 2, usem_int_bb_b0_regs, usem_prty_bb_b0_regs} } }, + {"xsem", {{3, 2, xsem_int_bb_b0_regs, xsem_prty_bb_b0_regs} } }, + {"ysem", {{3, 2, ysem_int_bb_b0_regs, ysem_prty_bb_b0_regs} } }, + {"psem", {{3, 3, psem_int_bb_b0_regs, psem_prty_bb_b0_regs} } }, + {"rss", {{1, 1, rss_int_bb_b0_regs, rss_prty_bb_b0_regs} } }, + {"tmld", {{1, 1, tmld_int_bb_b0_regs, tmld_prty_bb_b0_regs} } }, + {"muld", {{1, 1, muld_int_bb_b0_regs, muld_prty_bb_b0_regs} } }, + {"yuld", {{1, 1, yuld_int_bb_b0_regs, yuld_prty_bb_b0_regs} } }, + {"xyld", {{1, 1, xyld_int_bb_b0_regs, xyld_prty_bb_b0_regs} } }, + {"prm", {{1, 2, prm_int_bb_b0_regs, prm_prty_bb_b0_regs} } }, + {"pbf_pb1", {{1, 1, pbf_pb1_int_bb_b0_regs, + pbf_pb1_prty_bb_b0_regs} } }, + {"pbf_pb2", {{1, 1, pbf_pb2_int_bb_b0_regs, + pbf_pb2_prty_bb_b0_regs} } }, + {"rpb", { {1, 1, rpb_int_bb_b0_regs, rpb_prty_bb_b0_regs} } }, + {"btb", { {11, 2, btb_int_bb_b0_regs, btb_prty_bb_b0_regs} } }, + {"pbf", { {1, 3, pbf_int_bb_b0_regs, pbf_prty_bb_b0_regs} } }, + {"rdif", { {1, 1, rdif_int_bb_b0_regs, rdif_prty_bb_b0_regs} } }, + {"tdif", { {1, 2, tdif_int_bb_b0_regs, tdif_prty_bb_b0_regs} } }, + {"cdu", { {1, 1, cdu_int_bb_b0_regs, cdu_prty_bb_b0_regs} } }, + {"ccfc", { {1, 2, ccfc_int_bb_b0_regs, ccfc_prty_bb_b0_regs} } }, + {"tcfc", { {1, 2, tcfc_int_bb_b0_regs, tcfc_prty_bb_b0_regs} } }, + {"igu", { {1, 3, igu_int_bb_b0_regs, igu_prty_bb_b0_regs} } }, + {"cau", { {1, 1, cau_int_bb_b0_regs, cau_prty_bb_b0_regs} } }, + {"umac", { {0, 0, NULL, NULL} } }, + {"xmac", { {0, 0, NULL, NULL} } }, + {"dbg", { {1, 1, dbg_int_bb_b0_regs, dbg_prty_bb_b0_regs} } }, + {"nig", { {6, 5, nig_int_bb_b0_regs, nig_prty_bb_b0_regs} } }, + {"wol", { {0, 0, NULL, NULL} } }, + {"bmbn", { {0, 0, NULL, NULL} } }, + {"ipc", { {1, 1, ipc_int_bb_b0_regs, ipc_prty_bb_b0_regs} } }, + {"nwm", { {0, 0, NULL, NULL} } }, + {"nws", { {0, 0, NULL, NULL} } }, + {"ms", { {0, 0, NULL, NULL} } }, + {"phy_pcie", { {0, 0, NULL, NULL} } }, + {"misc_aeu", { {0, 0, NULL, NULL} } }, + {"bar0_map", { {0, 0, NULL, NULL} } },}; + /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { { { /* After Invert 1 */ {"GPIO0 function%d", - (32 << ATTENTION_LENGTH_SHIFT)}, + (32 << ATTENTION_LENGTH_SHIFT), MAX_BLOCK_ID}, } }, { { /* After Invert 2 */ - {"PGLUE config_space", ATTENTION_SINGLE}, - {"PGLUE misc_flr", ATTENTION_SINGLE}, - {"PGLUE B RBC", ATTENTION_PAR_INT}, - {"PGLUE misc_mctp", ATTENTION_SINGLE}, - {"Flash event", ATTENTION_SINGLE}, - {"SMB event", ATTENTION_SINGLE}, - {"Main Power", ATTENTION_SINGLE}, + {"PGLUE config_space", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"PGLUE misc_flr", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"PGLUE B RBC", ATTENTION_PAR_INT, BLOCK_PGLUE_B}, + {"PGLUE misc_mctp", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"Flash event", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"SMB event", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"Main Power", ATTENTION_SINGLE, MAX_BLOCK_ID}, {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | - (1 << ATTENTION_OFFSET_SHIFT)}, + (1 << ATTENTION_OFFSET_SHIFT), + MAX_BLOCK_ID}, {"PCIE glue/PXP VPD %d", - (16 << ATTENTION_LENGTH_SHIFT)}, + (16 << ATTENTION_LENGTH_SHIFT), BLOCK_PGLCS}, } }, { { /* After Invert 3 */ {"General Attention %d", - (32 << ATTENTION_LENGTH_SHIFT)}, + (32 << ATTENTION_LENGTH_SHIFT), MAX_BLOCK_ID}, } }, { { /* After Invert 4 */ - {"General Attention 32", ATTENTION_SINGLE}, + {"General Attention 32", ATTENTION_SINGLE, + MAX_BLOCK_ID}, {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | - (33 << ATTENTION_OFFSET_SHIFT)}, - {"General Attention 35", ATTENTION_SINGLE}, - {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT)}, - {"MCP CPU", ATTENTION_SINGLE}, - {"MCP Watchdog timer", ATTENTION_SINGLE}, - {"MCP M2P", ATTENTION_SINGLE}, - {"AVS stop status ready", ATTENTION_SINGLE}, - {"MSTAT", ATTENTION_PAR_INT}, - {"MSTAT per-path", ATTENTION_PAR_INT}, - {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT)}, - {"NIG", ATTENTION_PAR_INT}, - {"BMB/OPTE/MCP", ATTENTION_PAR_INT}, - {"BTB", ATTENTION_PAR_INT}, - {"BRB", ATTENTION_PAR_INT}, - {"PRS", ATTENTION_PAR_INT}, + (33 << ATTENTION_OFFSET_SHIFT), MAX_BLOCK_ID}, + {"General Attention 35", ATTENTION_SINGLE, + MAX_BLOCK_ID}, + {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), + BLOCK_CNIG}, + {"MCP CPU", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"MCP Watchdog timer", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"MCP M2P", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"AVS stop status ready", ATTENTION_SINGLE, + MAX_BLOCK_ID}, + {"MSTAT", ATTENTION_PAR_INT, MAX_BLOCK_ID}, + {"MSTAT per-path", ATTENTION_PAR_INT, MAX_BLOCK_ID}, + {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), + MAX_BLOCK_ID}, + {"NIG", ATTENTION_PAR_INT, BLOCK_NIG}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT, BLOCK_BMB}, + {"BTB", ATTENTION_PAR_INT, BLOCK_BTB}, + {"BRB", ATTENTION_PAR_INT, BLOCK_BRB}, + {"PRS", ATTENTION_PAR_INT, BLOCK_PRS}, } }, { { /* After Invert 5 */ - {"SRC", ATTENTION_PAR_INT}, - {"PB Client1", ATTENTION_PAR_INT}, - {"PB Client2", ATTENTION_PAR_INT}, - {"RPB", ATTENTION_PAR_INT}, - {"PBF", ATTENTION_PAR_INT}, - {"QM", ATTENTION_PAR_INT}, - {"TM", ATTENTION_PAR_INT}, - {"MCM", ATTENTION_PAR_INT}, - {"MSDM", ATTENTION_PAR_INT}, - {"MSEM", ATTENTION_PAR_INT}, - {"PCM", ATTENTION_PAR_INT}, - {"PSDM", ATTENTION_PAR_INT}, - {"PSEM", ATTENTION_PAR_INT}, - {"TCM", ATTENTION_PAR_INT}, - {"TSDM", ATTENTION_PAR_INT}, - {"TSEM", ATTENTION_PAR_INT}, + {"SRC", ATTENTION_PAR_INT, BLOCK_SRC}, + {"PB Client1", ATTENTION_PAR_INT, BLOCK_PBF_PB1}, + {"PB Client2", ATTENTION_PAR_INT, BLOCK_PBF_PB2}, + {"RPB", ATTENTION_PAR_INT, BLOCK_RPB}, + {"PBF", ATTENTION_PAR_INT, BLOCK_PBF}, + {"QM", ATTENTION_PAR_INT, BLOCK_QM}, + {"TM", ATTENTION_PAR_INT, BLOCK_TM}, + {"MCM", ATTENTION_PAR_INT, BLOCK_MCM}, + {"MSDM", ATTENTION_PAR_INT, BLOCK_MSDM}, + {"MSEM", ATTENTION_PAR_INT, BLOCK_MSEM}, + {"PCM", ATTENTION_PAR_INT, BLOCK_PCM}, + {"PSDM", ATTENTION_PAR_INT, BLOCK_PSDM}, + {"PSEM", ATTENTION_PAR_INT, BLOCK_PSEM}, + {"TCM", ATTENTION_PAR_INT, BLOCK_TCM}, + {"TSDM", ATTENTION_PAR_INT, BLOCK_TSDM}, + {"TSEM", ATTENTION_PAR_INT, BLOCK_TSEM}, } }, { { /* After Invert 6 */ - {"UCM", ATTENTION_PAR_INT}, - {"USDM", ATTENTION_PAR_INT}, - {"USEM", ATTENTION_PAR_INT}, - {"XCM", ATTENTION_PAR_INT}, - {"XSDM", ATTENTION_PAR_INT}, - {"XSEM", ATTENTION_PAR_INT}, - {"YCM", ATTENTION_PAR_INT}, - {"YSDM", ATTENTION_PAR_INT}, - {"YSEM", ATTENTION_PAR_INT}, - {"XYLD", ATTENTION_PAR_INT}, - {"TMLD", ATTENTION_PAR_INT}, - {"MYLD", ATTENTION_PAR_INT}, - {"YULD", ATTENTION_PAR_INT}, - {"DORQ", ATTENTION_PAR_INT}, - {"DBG", ATTENTION_PAR_INT}, - {"IPC", ATTENTION_PAR_INT}, + {"UCM", ATTENTION_PAR_INT, BLOCK_UCM}, + {"USDM", ATTENTION_PAR_INT, BLOCK_USDM}, + {"USEM", ATTENTION_PAR_INT, BLOCK_USEM}, + {"XCM", ATTENTION_PAR_INT, BLOCK_XCM}, + {"XSDM", ATTENTION_PAR_INT, BLOCK_XSDM}, + {"XSEM", ATTENTION_PAR_INT, BLOCK_XSEM}, + {"YCM", ATTENTION_PAR_INT, BLOCK_YCM}, + {"YSDM", ATTENTION_PAR_INT, BLOCK_YSDM}, + {"YSEM", ATTENTION_PAR_INT, BLOCK_YSEM}, + {"XYLD", ATTENTION_PAR_INT, BLOCK_XYLD}, + {"TMLD", ATTENTION_PAR_INT, BLOCK_TMLD}, + {"MYLD", ATTENTION_PAR_INT, BLOCK_MULD}, + {"YULD", ATTENTION_PAR_INT, BLOCK_YULD}, + {"DORQ", ATTENTION_PAR_INT, BLOCK_DORQ}, + {"DBG", ATTENTION_PAR_INT, BLOCK_DBG}, + {"IPC", ATTENTION_PAR_INT, BLOCK_IPC}, } }, { { /* After Invert 7 */ - {"CCFC", ATTENTION_PAR_INT}, - {"CDU", ATTENTION_PAR_INT}, - {"DMAE", ATTENTION_PAR_INT}, - {"IGU", ATTENTION_PAR_INT}, - {"ATC", ATTENTION_PAR_INT}, - {"CAU", ATTENTION_PAR_INT}, - {"PTU", ATTENTION_PAR_INT}, - {"PRM", ATTENTION_PAR_INT}, - {"TCFC", ATTENTION_PAR_INT}, - {"RDIF", ATTENTION_PAR_INT}, - {"TDIF", ATTENTION_PAR_INT}, - {"RSS", ATTENTION_PAR_INT}, - {"MISC", ATTENTION_PAR_INT}, - {"MISCS", ATTENTION_PAR_INT}, - {"PCIE", ATTENTION_PAR}, - {"Vaux PCI core", ATTENTION_SINGLE}, - {"PSWRQ", ATTENTION_PAR_INT}, + {"CCFC", ATTENTION_PAR_INT, BLOCK_CCFC}, + {"CDU", ATTENTION_PAR_INT, BLOCK_CDU}, + {"DMAE", ATTENTION_PAR_INT, BLOCK_DMAE}, + {"IGU", ATTENTION_PAR_INT, BLOCK_IGU}, + {"ATC", ATTENTION_PAR_INT, MAX_BLOCK_ID}, + {"CAU", ATTENTION_PAR_INT, BLOCK_CAU}, + {"PTU", ATTENTION_PAR_INT, BLOCK_PTU}, + {"PRM", ATTENTION_PAR_INT, BLOCK_PRM}, + {"TCFC", ATTENTION_PAR_INT, BLOCK_TCFC}, + {"RDIF", ATTENTION_PAR_INT, BLOCK_RDIF}, + {"TDIF", ATTENTION_PAR_INT, BLOCK_TDIF}, + {"RSS", ATTENTION_PAR_INT, BLOCK_RSS}, + {"MISC", ATTENTION_PAR_INT, BLOCK_MISC}, + {"MISCS", ATTENTION_PAR_INT, BLOCK_MISCS}, + {"PCIE", ATTENTION_PAR, BLOCK_PCIE}, + {"Vaux PCI core", ATTENTION_SINGLE, BLOCK_PGLCS}, + {"PSWRQ", ATTENTION_PAR_INT, BLOCK_PSWRQ}, } }, { { /* After Invert 8 */ - {"PSWRQ (pci_clk)", ATTENTION_PAR_INT}, - {"PSWWR", ATTENTION_PAR_INT}, - {"PSWWR (pci_clk)", ATTENTION_PAR_INT}, - {"PSWRD", ATTENTION_PAR_INT}, - {"PSWRD (pci_clk)", ATTENTION_PAR_INT}, - {"PSWHST", ATTENTION_PAR_INT}, - {"PSWHST (pci_clk)", ATTENTION_PAR_INT}, - {"GRC", ATTENTION_PAR_INT}, - {"CPMU", ATTENTION_PAR_INT}, - {"NCSI", ATTENTION_PAR_INT}, - {"MSEM PRAM", ATTENTION_PAR}, - {"PSEM PRAM", ATTENTION_PAR}, - {"TSEM PRAM", ATTENTION_PAR}, - {"USEM PRAM", ATTENTION_PAR}, - {"XSEM PRAM", ATTENTION_PAR}, - {"YSEM PRAM", ATTENTION_PAR}, - {"pxp_misc_mps", ATTENTION_PAR}, - {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE}, - {"PERST_B assertion", ATTENTION_SINGLE}, - {"PERST_B deassertion", ATTENTION_SINGLE}, - {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT)}, + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWRQ2}, + {"PSWWR", ATTENTION_PAR_INT, BLOCK_PSWWR}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWWR2}, + {"PSWRD", ATTENTION_PAR_INT, BLOCK_PSWRD}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWRD2}, + {"PSWHST", ATTENTION_PAR_INT, BLOCK_PSWHST}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWHST2}, + {"GRC", ATTENTION_PAR_INT, BLOCK_GRC}, + {"CPMU", ATTENTION_PAR_INT, BLOCK_CPMU}, + {"NCSI", ATTENTION_PAR_INT, BLOCK_NCSI}, + {"MSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, + {"PSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, + {"TSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, + {"USEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, + {"XSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, + {"YSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, + {"pxp_misc_mps", ATTENTION_PAR, BLOCK_PGLCS}, + {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, + BLOCK_PGLCS}, + {"PERST_B assertion", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"PERST_B deassertion", ATTENTION_SINGLE, + MAX_BLOCK_ID}, + {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), + MAX_BLOCK_ID}, } }, { { /* After Invert 9 */ - {"MCP Latched memory", ATTENTION_PAR}, - {"MCP Latched scratchpad cache", ATTENTION_SINGLE}, - {"MCP Latched ump_tx", ATTENTION_PAR}, - {"MCP Latched scratchpad", ATTENTION_PAR}, - {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT)}, + {"MCP Latched memory", ATTENTION_PAR, MAX_BLOCK_ID}, + {"MCP Latched scratchpad cache", ATTENTION_SINGLE, + MAX_BLOCK_ID}, + {"MCP Latched ump_tx", ATTENTION_PAR, MAX_BLOCK_ID}, + {"MCP Latched scratchpad", ATTENTION_PAR, + MAX_BLOCK_ID}, + {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), + MAX_BLOCK_ID}, } }, }; @@ -316,6 +1544,28 @@ static int qed_int_assertion(struct qed_hwfn *p_hwfn, return 0; } +static void qed_int_deassertion_print_bit(struct qed_hwfn *p_hwfn, + struct attn_hw_reg *p_reg_desc, + struct attn_hw_block *p_block, + enum qed_attention_type type, + u32 val, u32 mask) +{ + int j; + + for (j = 0; j < p_reg_desc->num_of_bits; j++) { + if (!(val & (1 << j))) + continue; + + DP_NOTICE(p_hwfn, + "%s (%s): reg %d [0x%08x], bit %d [%s]\n", + p_block->name, + type == QED_ATTN_TYPE_ATTN ? "Interrupt" : + "Parity", + p_reg_desc->reg_idx, p_reg_desc->sts_addr, + j, (mask & (1 << j)) ? " [MASKED]" : ""); + } +} + /** * @brief qed_int_deassertion_aeu_bit - handles the effects of a single * cause of the attention @@ -340,6 +1590,31 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", p_aeu->bit_name, bitmask); + /* Handle HW block interrupt registers */ + if (p_aeu->block_index != MAX_BLOCK_ID) { + struct attn_hw_block *p_block; + int i; + + p_block = &attn_blocks[p_aeu->block_index]; + + /* Handle each interrupt register */ + for (i = 0; i < p_block->chip_regs[0].num_of_int_regs; i++) { + struct attn_hw_reg *p_reg_desc; + u32 sts_addr; + + p_reg_desc = p_block->chip_regs[0].int_regs[i]; + sts_addr = p_reg_desc->sts_addr; + + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr); + mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->mask_addr); + qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, + p_block, + QED_ATTN_TYPE_ATTN, + val, mask); + } + } + /* Prevent this Attention from being asserted in the future */ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); @@ -349,6 +1624,62 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, return rc; } +static void qed_int_parity_print(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + struct attn_hw_block *p_block, + u8 bit_index) +{ + int i; + + for (i = 0; i < p_block->chip_regs[0].num_of_prty_regs; i++) { + struct attn_hw_reg *p_reg_desc; + u32 val, mask; + + p_reg_desc = p_block->chip_regs[0].prty_regs[i]; + + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->sts_clr_addr); + mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->mask_addr); + qed_int_deassertion_print_bit(p_hwfn, p_reg_desc, + p_block, + QED_ATTN_TYPE_PARITY, + val, mask); + } +} + +/** + * @brief qed_int_deassertion_parity - handle a single parity AEU source + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the parity + * @param bit_index + */ +static void qed_int_deassertion_parity(struct qed_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u8 bit_index) +{ + u32 block_id = p_aeu->block_index; + + DP_INFO(p_hwfn->cdev, "%s[%d] parity attention is set\n", + p_aeu->bit_name, bit_index); + + if (block_id != MAX_BLOCK_ID) { + qed_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id], + bit_index); + + /* In BB, there's a single parity bit for several blocks */ + if (block_id == BLOCK_BTB) { + qed_int_parity_print(p_hwfn, p_aeu, + &attn_blocks[BLOCK_OPTE], + bit_index); + qed_int_parity_print(p_hwfn, p_aeu, + &attn_blocks[BLOCK_MCP], + bit_index); + } + } +} + /** * @brief - handles deassertion of previously asserted attentions. * @@ -392,11 +1723,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn, struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; if ((p_bit->flags & ATTENTION_PARITY) && - !!(parities & (1 << bit_idx))) { - DP_INFO(p_hwfn, - "%s[%d] parity attention is set\n", - p_bit->bit_name, bit_idx); - } + !!(parities & (1 << bit_idx))) + qed_int_deassertion_parity(p_hwfn, p_bit, + bit_idx); bit_idx += ATTENTION_LENGTH(p_bit->flags); } -- cgit v1.2.3 From b4149dc7e11e8fe58773ecb8862c5217ed205ad9 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 28 Feb 2016 12:26:55 +0200 Subject: qed: Print additional HW attention info This patch utilizes the attention infrastructure to log additional information that relates only to specific HW blocks. For some of those HW blocks, it also stops automatically disabling the attention generation as the attention is considered benign and thus should only be logged; No fear of it flooding the system. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_int.c | 526 ++++++++++++++++++++----- drivers/net/ethernet/qlogic/qed/qed_reg_addr.h | 58 +++ 2 files changed, 479 insertions(+), 105 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index c8bca7776b43..ffd0accc2ec9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -66,6 +66,9 @@ struct aeu_invert_reg_bit { #define ATTENTION_OFFSET_SHIFT (12) unsigned int flags; + /* Callback to call if attention will be triggered */ + int (*cb)(struct qed_hwfn *p_hwfn); + enum block_id block_index; }; @@ -1285,170 +1288,463 @@ static struct attn_hw_block attn_blocks[] = { {"misc_aeu", { {0, 0, NULL, NULL} } }, {"bar0_map", { {0, 0, NULL, NULL} } },}; +/* Specific HW attention callbacks */ +static int qed_mcp_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); + + /* This might occur on certain instances; Log it once then mask it */ + DP_INFO(p_hwfn->cdev, "MCP_REG_CPU_STATE: %08x - Masking...\n", + tmp); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, + 0xffffffff); + + return 0; +} + +#define QED_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) +#define ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) +#define ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) +#define ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0xf) +#define ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) +#define ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x1) +#define ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) +#define ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0xff) +#define ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) +#define ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0xf) +#define ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) +#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0xff) +#define ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) +static int qed_pswhst_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_VALID); + + if (tmp & QED_PSWHST_ATTENTION_INCORRECT_ACCESS) { + u32 addr, data, length; + + addr = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_ADDRESS); + data = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_DATA); + length = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_LENGTH); + + DP_INFO(p_hwfn->cdev, + "Incorrect access to %08x of length %08x - PF [%02x] VF [%04x] [valid %02x] client [%02x] write [%02x] Byte-Enable [%04x] [%08x]\n", + addr, length, + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_PF_ID), + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_VF_ID), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_VF_VALID), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_CLIENT), + (u8) GET_FIELD(data, ATTENTION_INCORRECT_ACCESS_WR), + (u8) GET_FIELD(data, + ATTENTION_INCORRECT_ACCESS_BYTE_EN), + data); + } + + return 0; +} + +#define QED_GRC_ATTENTION_VALID_BIT (1 << 0) +#define QED_GRC_ATTENTION_ADDRESS_MASK (0x7fffff) +#define QED_GRC_ATTENTION_ADDRESS_SHIFT (0) +#define QED_GRC_ATTENTION_RDWR_BIT (1 << 23) +#define QED_GRC_ATTENTION_MASTER_MASK (0xf) +#define QED_GRC_ATTENTION_MASTER_SHIFT (24) +#define QED_GRC_ATTENTION_PF_MASK (0xf) +#define QED_GRC_ATTENTION_PF_SHIFT (0) +#define QED_GRC_ATTENTION_VF_MASK (0xff) +#define QED_GRC_ATTENTION_VF_SHIFT (4) +#define QED_GRC_ATTENTION_PRIV_MASK (0x3) +#define QED_GRC_ATTENTION_PRIV_SHIFT (14) +#define QED_GRC_ATTENTION_PRIV_VF (0) +static const char *attn_master_to_str(u8 master) +{ + switch (master) { + case 1: return "PXP"; + case 2: return "MCP"; + case 3: return "MSDM"; + case 4: return "PSDM"; + case 5: return "YSDM"; + case 6: return "USDM"; + case 7: return "TSDM"; + case 8: return "XSDM"; + case 9: return "DBU"; + case 10: return "DMAE"; + default: + return "Unkown"; + } +} + +static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp, tmp2; + + /* We've already cleared the timeout interrupt register, so we learn + * of interrupts via the validity register + */ + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); + if (!(tmp & QED_GRC_ATTENTION_VALID_BIT)) + goto out; + + /* Read the GRC timeout information */ + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); + tmp2 = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); + + DP_INFO(p_hwfn->cdev, + "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n", + tmp2, tmp, + (tmp & QED_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", + GET_FIELD(tmp, QED_GRC_ATTENTION_ADDRESS) << 2, + attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), + GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), + (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == + QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", + GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); + +out: + /* Regardles of anything else, clean the validity bit */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); + return 0; +} + +#define PGLUE_ATTENTION_VALID (1 << 29) +#define PGLUE_ATTENTION_RD_VALID (1 << 26) +#define PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf) +#define PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) +#define PGLUE_ATTENTION_DETAILS_VF_VALID_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS_VF_VALID_SHIFT (19) +#define PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff) +#define PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) +#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_WAS_ERR_SHIFT (21) +#define PGLUE_ATTENTION_DETAILS2_BME_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_BME_SHIFT (22) +#define PGLUE_ATTENTION_DETAILS2_FID_EN_MASK (0x1) +#define PGLUE_ATTENTION_DETAILS2_FID_EN_SHIFT (23) +#define PGLUE_ATTENTION_ICPL_VALID (1 << 23) +#define PGLUE_ATTENTION_ZLR_VALID (1 << 25) +#define PGLUE_ATTENTION_ILT_VALID (1 << 23) +static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 tmp; + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS2); + if (tmp & PGLUE_ATTENTION_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_63_32); + details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS); + + DP_INFO(p_hwfn, + "Illegal write by chip to [%08x:%08x] blocked.\n" + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, + PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0); + } + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS2); + if (tmp & PGLUE_ATTENTION_RD_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_63_32); + details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS); + + DP_INFO(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked.\n" + " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n" + " Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID), + (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID), + GET_FIELD(details, + PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0, + tmp, + GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 + : 0, + GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0, + GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 + : 0); + } + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + if (tmp & PGLUE_ATTENTION_ICPL_VALID) + DP_INFO(p_hwfn, "ICPL eror - %08x\n", tmp); + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + if (tmp & PGLUE_ATTENTION_ZLR_VALID) { + u32 addr_hi, addr_lo; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); + + DP_INFO(p_hwfn, "ZLR eror - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); + } + + tmp = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + if (tmp & PGLUE_ATTENTION_ILT_VALID) { + u32 addr_hi, addr_lo, details; + + addr_lo = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); + addr_hi = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); + details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS); + + DP_INFO(p_hwfn, + "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); + } + + /* Clear the indications */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + + return 0; +} + +#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) +static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) +{ + u32 reason; + + reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & + QED_DORQ_ATTENTION_REASON_MASK; + if (reason) { + u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + DORQ_REG_DB_DROP_DETAILS); + + DP_INFO(p_hwfn->cdev, + "DORQ db_drop: adress 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", + qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS), + (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), + GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, + reason); + } + + return -EINVAL; +} + /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { { { /* After Invert 1 */ {"GPIO0 function%d", - (32 << ATTENTION_LENGTH_SHIFT), MAX_BLOCK_ID}, + (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, } }, { { /* After Invert 2 */ - {"PGLUE config_space", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"PGLUE misc_flr", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"PGLUE B RBC", ATTENTION_PAR_INT, BLOCK_PGLUE_B}, - {"PGLUE misc_mctp", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"Flash event", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"SMB event", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"Main Power", ATTENTION_SINGLE, MAX_BLOCK_ID}, + {"PGLUE config_space", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PGLUE misc_flr", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"PGLUE B RBC", ATTENTION_PAR_INT, + qed_pglub_rbc_attn_cb, BLOCK_PGLUE_B}, + {"PGLUE misc_mctp", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"Flash event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"SMB event", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, + {"Main Power", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"SW timers #%d", (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, {"PCIE glue/PXP VPD %d", - (16 << ATTENTION_LENGTH_SHIFT), BLOCK_PGLCS}, + (16 << ATTENTION_LENGTH_SHIFT), NULL, BLOCK_PGLCS}, } }, { { /* After Invert 3 */ {"General Attention %d", - (32 << ATTENTION_LENGTH_SHIFT), MAX_BLOCK_ID}, + (32 << ATTENTION_LENGTH_SHIFT), NULL, MAX_BLOCK_ID}, } }, { { /* After Invert 4 */ {"General Attention 32", ATTENTION_SINGLE, - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, {"General Attention %d", (2 << ATTENTION_LENGTH_SHIFT) | - (33 << ATTENTION_OFFSET_SHIFT), MAX_BLOCK_ID}, + (33 << ATTENTION_OFFSET_SHIFT), NULL, MAX_BLOCK_ID}, {"General Attention 35", ATTENTION_SINGLE, - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), - BLOCK_CNIG}, - {"MCP CPU", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"MCP Watchdog timer", ATTENTION_SINGLE, MAX_BLOCK_ID}, - {"MCP M2P", ATTENTION_SINGLE, MAX_BLOCK_ID}, + NULL, BLOCK_CNIG}, + {"MCP CPU", ATTENTION_SINGLE, + qed_mcp_attn_cb, MAX_BLOCK_ID}, + {"MCP Watchdog timer", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, + {"MCP M2P", ATTENTION_SINGLE, NULL, MAX_BLOCK_ID}, {"AVS stop status ready", ATTENTION_SINGLE, - MAX_BLOCK_ID}, - {"MSTAT", ATTENTION_PAR_INT, MAX_BLOCK_ID}, - {"MSTAT per-path", ATTENTION_PAR_INT, MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, + {"MSTAT", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, + {"MSTAT per-path", ATTENTION_PAR_INT, + NULL, MAX_BLOCK_ID}, {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), - MAX_BLOCK_ID}, - {"NIG", ATTENTION_PAR_INT, BLOCK_NIG}, - {"BMB/OPTE/MCP", ATTENTION_PAR_INT, BLOCK_BMB}, - {"BTB", ATTENTION_PAR_INT, BLOCK_BTB}, - {"BRB", ATTENTION_PAR_INT, BLOCK_BRB}, - {"PRS", ATTENTION_PAR_INT, BLOCK_PRS}, + NULL, MAX_BLOCK_ID}, + {"NIG", ATTENTION_PAR_INT, NULL, BLOCK_NIG}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT, NULL, BLOCK_BMB}, + {"BTB", ATTENTION_PAR_INT, NULL, BLOCK_BTB}, + {"BRB", ATTENTION_PAR_INT, NULL, BLOCK_BRB}, + {"PRS", ATTENTION_PAR_INT, NULL, BLOCK_PRS}, } }, { { /* After Invert 5 */ - {"SRC", ATTENTION_PAR_INT, BLOCK_SRC}, - {"PB Client1", ATTENTION_PAR_INT, BLOCK_PBF_PB1}, - {"PB Client2", ATTENTION_PAR_INT, BLOCK_PBF_PB2}, - {"RPB", ATTENTION_PAR_INT, BLOCK_RPB}, - {"PBF", ATTENTION_PAR_INT, BLOCK_PBF}, - {"QM", ATTENTION_PAR_INT, BLOCK_QM}, - {"TM", ATTENTION_PAR_INT, BLOCK_TM}, - {"MCM", ATTENTION_PAR_INT, BLOCK_MCM}, - {"MSDM", ATTENTION_PAR_INT, BLOCK_MSDM}, - {"MSEM", ATTENTION_PAR_INT, BLOCK_MSEM}, - {"PCM", ATTENTION_PAR_INT, BLOCK_PCM}, - {"PSDM", ATTENTION_PAR_INT, BLOCK_PSDM}, - {"PSEM", ATTENTION_PAR_INT, BLOCK_PSEM}, - {"TCM", ATTENTION_PAR_INT, BLOCK_TCM}, - {"TSDM", ATTENTION_PAR_INT, BLOCK_TSDM}, - {"TSEM", ATTENTION_PAR_INT, BLOCK_TSEM}, + {"SRC", ATTENTION_PAR_INT, NULL, BLOCK_SRC}, + {"PB Client1", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB1}, + {"PB Client2", ATTENTION_PAR_INT, NULL, BLOCK_PBF_PB2}, + {"RPB", ATTENTION_PAR_INT, NULL, BLOCK_RPB}, + {"PBF", ATTENTION_PAR_INT, NULL, BLOCK_PBF}, + {"QM", ATTENTION_PAR_INT, NULL, BLOCK_QM}, + {"TM", ATTENTION_PAR_INT, NULL, BLOCK_TM}, + {"MCM", ATTENTION_PAR_INT, NULL, BLOCK_MCM}, + {"MSDM", ATTENTION_PAR_INT, NULL, BLOCK_MSDM}, + {"MSEM", ATTENTION_PAR_INT, NULL, BLOCK_MSEM}, + {"PCM", ATTENTION_PAR_INT, NULL, BLOCK_PCM}, + {"PSDM", ATTENTION_PAR_INT, NULL, BLOCK_PSDM}, + {"PSEM", ATTENTION_PAR_INT, NULL, BLOCK_PSEM}, + {"TCM", ATTENTION_PAR_INT, NULL, BLOCK_TCM}, + {"TSDM", ATTENTION_PAR_INT, NULL, BLOCK_TSDM}, + {"TSEM", ATTENTION_PAR_INT, NULL, BLOCK_TSEM}, } }, { { /* After Invert 6 */ - {"UCM", ATTENTION_PAR_INT, BLOCK_UCM}, - {"USDM", ATTENTION_PAR_INT, BLOCK_USDM}, - {"USEM", ATTENTION_PAR_INT, BLOCK_USEM}, - {"XCM", ATTENTION_PAR_INT, BLOCK_XCM}, - {"XSDM", ATTENTION_PAR_INT, BLOCK_XSDM}, - {"XSEM", ATTENTION_PAR_INT, BLOCK_XSEM}, - {"YCM", ATTENTION_PAR_INT, BLOCK_YCM}, - {"YSDM", ATTENTION_PAR_INT, BLOCK_YSDM}, - {"YSEM", ATTENTION_PAR_INT, BLOCK_YSEM}, - {"XYLD", ATTENTION_PAR_INT, BLOCK_XYLD}, - {"TMLD", ATTENTION_PAR_INT, BLOCK_TMLD}, - {"MYLD", ATTENTION_PAR_INT, BLOCK_MULD}, - {"YULD", ATTENTION_PAR_INT, BLOCK_YULD}, - {"DORQ", ATTENTION_PAR_INT, BLOCK_DORQ}, - {"DBG", ATTENTION_PAR_INT, BLOCK_DBG}, - {"IPC", ATTENTION_PAR_INT, BLOCK_IPC}, + {"UCM", ATTENTION_PAR_INT, NULL, BLOCK_UCM}, + {"USDM", ATTENTION_PAR_INT, NULL, BLOCK_USDM}, + {"USEM", ATTENTION_PAR_INT, NULL, BLOCK_USEM}, + {"XCM", ATTENTION_PAR_INT, NULL, BLOCK_XCM}, + {"XSDM", ATTENTION_PAR_INT, NULL, BLOCK_XSDM}, + {"XSEM", ATTENTION_PAR_INT, NULL, BLOCK_XSEM}, + {"YCM", ATTENTION_PAR_INT, NULL, BLOCK_YCM}, + {"YSDM", ATTENTION_PAR_INT, NULL, BLOCK_YSDM}, + {"YSEM", ATTENTION_PAR_INT, NULL, BLOCK_YSEM}, + {"XYLD", ATTENTION_PAR_INT, NULL, BLOCK_XYLD}, + {"TMLD", ATTENTION_PAR_INT, NULL, BLOCK_TMLD}, + {"MYLD", ATTENTION_PAR_INT, NULL, BLOCK_MULD}, + {"YULD", ATTENTION_PAR_INT, NULL, BLOCK_YULD}, + {"DORQ", ATTENTION_PAR_INT, + qed_dorq_attn_cb, BLOCK_DORQ}, + {"DBG", ATTENTION_PAR_INT, NULL, BLOCK_DBG}, + {"IPC", ATTENTION_PAR_INT, NULL, BLOCK_IPC}, } }, { { /* After Invert 7 */ - {"CCFC", ATTENTION_PAR_INT, BLOCK_CCFC}, - {"CDU", ATTENTION_PAR_INT, BLOCK_CDU}, - {"DMAE", ATTENTION_PAR_INT, BLOCK_DMAE}, - {"IGU", ATTENTION_PAR_INT, BLOCK_IGU}, - {"ATC", ATTENTION_PAR_INT, MAX_BLOCK_ID}, - {"CAU", ATTENTION_PAR_INT, BLOCK_CAU}, - {"PTU", ATTENTION_PAR_INT, BLOCK_PTU}, - {"PRM", ATTENTION_PAR_INT, BLOCK_PRM}, - {"TCFC", ATTENTION_PAR_INT, BLOCK_TCFC}, - {"RDIF", ATTENTION_PAR_INT, BLOCK_RDIF}, - {"TDIF", ATTENTION_PAR_INT, BLOCK_TDIF}, - {"RSS", ATTENTION_PAR_INT, BLOCK_RSS}, - {"MISC", ATTENTION_PAR_INT, BLOCK_MISC}, - {"MISCS", ATTENTION_PAR_INT, BLOCK_MISCS}, - {"PCIE", ATTENTION_PAR, BLOCK_PCIE}, - {"Vaux PCI core", ATTENTION_SINGLE, BLOCK_PGLCS}, - {"PSWRQ", ATTENTION_PAR_INT, BLOCK_PSWRQ}, + {"CCFC", ATTENTION_PAR_INT, NULL, BLOCK_CCFC}, + {"CDU", ATTENTION_PAR_INT, NULL, BLOCK_CDU}, + {"DMAE", ATTENTION_PAR_INT, NULL, BLOCK_DMAE}, + {"IGU", ATTENTION_PAR_INT, NULL, BLOCK_IGU}, + {"ATC", ATTENTION_PAR_INT, NULL, MAX_BLOCK_ID}, + {"CAU", ATTENTION_PAR_INT, NULL, BLOCK_CAU}, + {"PTU", ATTENTION_PAR_INT, NULL, BLOCK_PTU}, + {"PRM", ATTENTION_PAR_INT, NULL, BLOCK_PRM}, + {"TCFC", ATTENTION_PAR_INT, NULL, BLOCK_TCFC}, + {"RDIF", ATTENTION_PAR_INT, NULL, BLOCK_RDIF}, + {"TDIF", ATTENTION_PAR_INT, NULL, BLOCK_TDIF}, + {"RSS", ATTENTION_PAR_INT, NULL, BLOCK_RSS}, + {"MISC", ATTENTION_PAR_INT, NULL, BLOCK_MISC}, + {"MISCS", ATTENTION_PAR_INT, NULL, BLOCK_MISCS}, + {"PCIE", ATTENTION_PAR, NULL, BLOCK_PCIE}, + {"Vaux PCI core", ATTENTION_SINGLE, NULL, BLOCK_PGLCS}, + {"PSWRQ", ATTENTION_PAR_INT, NULL, BLOCK_PSWRQ}, } }, { { /* After Invert 8 */ - {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWRQ2}, - {"PSWWR", ATTENTION_PAR_INT, BLOCK_PSWWR}, - {"PSWWR (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWWR2}, - {"PSWRD", ATTENTION_PAR_INT, BLOCK_PSWRD}, - {"PSWRD (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWRD2}, - {"PSWHST", ATTENTION_PAR_INT, BLOCK_PSWHST}, - {"PSWHST (pci_clk)", ATTENTION_PAR_INT, BLOCK_PSWHST2}, - {"GRC", ATTENTION_PAR_INT, BLOCK_GRC}, - {"CPMU", ATTENTION_PAR_INT, BLOCK_CPMU}, - {"NCSI", ATTENTION_PAR_INT, BLOCK_NCSI}, - {"MSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, - {"PSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, - {"TSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, - {"USEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, - {"XSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, - {"YSEM PRAM", ATTENTION_PAR, MAX_BLOCK_ID}, - {"pxp_misc_mps", ATTENTION_PAR, BLOCK_PGLCS}, + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWRQ2}, + {"PSWWR", ATTENTION_PAR_INT, NULL, BLOCK_PSWWR}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWWR2}, + {"PSWRD", ATTENTION_PAR_INT, NULL, BLOCK_PSWRD}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWRD2}, + {"PSWHST", ATTENTION_PAR_INT, + qed_pswhst_attn_cb, BLOCK_PSWHST}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT, + NULL, BLOCK_PSWHST2}, + {"GRC", ATTENTION_PAR_INT, + qed_grc_attn_cb, BLOCK_GRC}, + {"CPMU", ATTENTION_PAR_INT, NULL, BLOCK_CPMU}, + {"NCSI", ATTENTION_PAR_INT, NULL, BLOCK_NCSI}, + {"MSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"PSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"TSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"USEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"XSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"YSEM PRAM", ATTENTION_PAR, NULL, MAX_BLOCK_ID}, + {"pxp_misc_mps", ATTENTION_PAR, NULL, BLOCK_PGLCS}, {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, - BLOCK_PGLCS}, - {"PERST_B assertion", ATTENTION_SINGLE, MAX_BLOCK_ID}, + NULL, BLOCK_PGLCS}, + {"PERST_B assertion", ATTENTION_SINGLE, + NULL, MAX_BLOCK_ID}, {"PERST_B deassertion", ATTENTION_SINGLE, - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, } }, { { /* After Invert 9 */ - {"MCP Latched memory", ATTENTION_PAR, MAX_BLOCK_ID}, + {"MCP Latched memory", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, {"MCP Latched scratchpad cache", ATTENTION_SINGLE, - MAX_BLOCK_ID}, - {"MCP Latched ump_tx", ATTENTION_PAR, MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, + {"MCP Latched ump_tx", ATTENTION_PAR, + NULL, MAX_BLOCK_ID}, {"MCP Latched scratchpad", ATTENTION_PAR, - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), - MAX_BLOCK_ID}, + NULL, MAX_BLOCK_ID}, } }, }; @@ -1585,14 +1881,22 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, u32 bitmask) { int rc = -EINVAL; - u32 val, mask = ~bitmask; + u32 val; DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", p_aeu->bit_name, bitmask); + /* Call callback before clearing the interrupt status */ + if (p_aeu->cb) { + DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", + p_aeu->bit_name); + rc = p_aeu->cb(p_hwfn); + } + /* Handle HW block interrupt registers */ if (p_aeu->block_index != MAX_BLOCK_ID) { struct attn_hw_block *p_block; + u32 mask; int i; p_block = &attn_blocks[p_aeu->block_index]; @@ -1603,7 +1907,14 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, u32 sts_addr; p_reg_desc = p_block->chip_regs[0].int_regs[i]; - sts_addr = p_reg_desc->sts_addr; + + /* In case of fatal attention, don't clear the status + * so it would appear in following idle check. + */ + if (rc == 0) + sts_addr = p_reg_desc->sts_clr_addr; + else + sts_addr = p_reg_desc->sts_addr; val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr); mask = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, @@ -1615,12 +1926,17 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, } } + /* If the attention is benign, no need to prevent it */ + if (!rc) + goto out; + /* Prevent this Attention from being asserted in the future */ val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); - qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & ~bitmask)); DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", p_aeu->bit_name); +out: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 2286e75642e9..c15b1622e636 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -127,8 +127,20 @@ 0x00c000UL #define DORQ_REG_IFEN \ 0x100040UL +#define DORQ_REG_DB_DROP_REASON \ + 0x100a2cUL +#define DORQ_REG_DB_DROP_DETAILS \ + 0x100a24UL +#define DORQ_REG_DB_DROP_DETAILS_ADDRESS \ + 0x100a1cUL #define GRC_REG_TIMEOUT_EN \ 0x050404UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID \ + 0x050054UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 \ + 0x05004cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 \ + 0x050050UL #define IGU_REG_BLOCK_CONFIGURATION \ 0x180040UL #define MCM_REG_INIT \ @@ -155,6 +167,40 @@ 0x1100000UL #define PGLUE_B_REG_ADMIN_PER_PF_REGION \ 0x2a9000UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 \ + 0x2aa150UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 \ + 0x2aa144UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 \ + 0x2aa148UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS \ + 0x2aa14cUL +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 \ + 0x2aa154UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 \ + 0x2aa158UL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS \ + 0x2aa15cUL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 \ + 0x2aa160UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL \ + 0x2aa164UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS \ + 0x2aa54cUL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 \ + 0x2aa544UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 \ + 0x2aa548UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 \ + 0x2aae74UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 \ + 0x2aae78UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS \ + 0x2aae7cUL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 \ + 0x2aae80UL +#define PGLUE_B_REG_LATCHED_ERRORS_CLR \ + 0x2aa3bcUL #define PRM_REG_DISABLE_PRM \ 0x230000UL #define PRS_REG_SOFT_RST \ @@ -171,6 +217,14 @@ 0x2a0040UL #define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ 0x29e050UL +#define PSWHST_REG_INCORRECT_ACCESS_VALID \ + 0x2a0070UL +#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS \ + 0x2a0074UL +#define PSWHST_REG_INCORRECT_ACCESS_DATA \ + 0x2a0068UL +#define PSWHST_REG_INCORRECT_ACCESS_LENGTH \ + 0x2a006cUL #define PSWRD_REG_DBG_SELECT \ 0x29c040UL #define PSWRD2_REG_CONF11 \ @@ -365,6 +419,10 @@ 0x7 << 0) #define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ 0 +#define MCP_REG_CPU_STATE \ + 0xe05004UL +#define MCP_REG_CPU_EVENT_MASK \ + 0xe05008UL #define PGLUE_B_REG_PF_BAR0_SIZE \ 0x2aae60UL #define PGLUE_B_REG_PF_BAR1_SIZE \ -- cgit v1.2.3 From 2fca6d288d7cac2debf170da021780cd8265c645 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 28 Feb 2016 21:56:34 +0300 Subject: of_mdio: kill useless variable in of_mdiobus_register() of_mdiobus_register() declares the 'paddr' variable to hold the result of the of_get_property() but only uses it once after that while the function can be called directly from the *if* statement. Remove that variable and switch to calling of_find_property() instead since we don't care about the "reg" property's value anyway... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 39c4be41ef83..669739b302b2 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -211,7 +211,6 @@ static bool of_mdiobus_child_is_phy(struct device_node *child) int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) { struct device_node *child; - const __be32 *paddr; bool scanphys = false; int addr, rc; @@ -246,8 +245,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) /* auto scan for PHYs with empty reg property */ for_each_available_child_of_node(np, child) { /* Skip PHYs with reg property set */ - paddr = of_get_property(child, "reg", NULL); - if (paddr) + if (of_find_property(child, "reg", NULL)) continue; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { -- cgit v1.2.3 From fcb6b92a682fe5032fdc31af7f8ed86f1dabb1e2 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Mon, 22 Feb 2016 10:21:41 +0200 Subject: iwlwifi: mvm: update ucode status before stopping device Leaving ucode_loaded to true after stop_device() has been called is a recipe for problems. Flows that are not sync'ed with the driver life cycle (like debugfs hooks and thermal hooks) must check that the firmware is loaded before they interact with it. Therefore we need to keep this variable updated with the real status of the firmware. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/d3.c | 3 +-- drivers/net/wireless/intel/iwlwifi/mvm/fw.c | 4 ++-- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 ++---- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 6 ++++++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 2 +- 5 files changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index cf5e6349301e..c1a313149eed 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -816,8 +816,7 @@ static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm) { iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - iwl_trans_stop_device(mvm->trans); - + iwl_mvm_stop_device(mvm); /* * Set the HW restart bit -- this is mostly true as we're * going to load new firmware and reprogram that, though diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index f075c36a77c5..efec6ec22f58 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1009,7 +1009,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); return ret; } @@ -1053,7 +1053,7 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm) return 0; error: - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 927b0559d42d..ec6b07282e7d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -972,7 +972,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) */ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); mvm->scan_status = 0; mvm->ps_disabled = false; @@ -1141,7 +1141,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) */ flush_work(&mvm->roc_done_wk); - iwl_trans_stop_device(mvm->trans); + iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ @@ -1172,8 +1172,6 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) mvm->scan_uid_status[i] = 0; } } - - mvm->ucode_loaded = false; } static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b461b909e7af..6d4e4879357c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1549,6 +1549,12 @@ void iwl_mvm_enable_ac_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, iwl_mvm_enable_txq(mvm, queue, mac80211_queue, ssn, &cfg, wdg_timeout); } +static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) +{ + mvm->ucode_loaded = false; + iwl_trans_stop_device(mvm->trans); +} + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 8bdaa7b27ab1..4716176cbfe0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -643,7 +643,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE); err = iwl_run_init_mvm_ucode(mvm, true); if (!err || !iwlmvm_mod_params.init_dbg) - iwl_trans_stop_device(trans); + iwl_mvm_stop_device(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE); mutex_unlock(&mvm->mutex); /* returns 0 if successful, 1 if success but in rfkill */ -- cgit v1.2.3 From 5eae443eb5e2b3777582ea37c6a002171ec134d5 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 24 Feb 2016 14:56:21 +0200 Subject: iwlwifi: pcie: detect and workaround invalid write ptr behavior In 9000 series A0 step the closed_rb_num is not wrapping around properly. The queue is wrapping around as it should, so we can W/A it by wrapping the closed_rb_num in the driver. While at it, extend RX logging and add error handling of other cases HW values may cause us to access invalid memory locations. Add also a proper masking of vid value read from HW - this should not have actual affect, but better to be on the safe side. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 398dd9332345..489b07a9e471 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1159,9 +1159,12 @@ restart: r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; + /* W/A 9000 device step A0 wrap-around bug */ + r &= (rxq->queue_size - 1); + /* Rx interrupt, but nothing sent from uCode */ if (i == r) - IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); + IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r); while (i != r) { struct iwl_rx_mem_buffer *rxb; @@ -1174,15 +1177,18 @@ restart: * used_bd is a 32 bit but only 12 are used to retrieve * the vid */ - u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]); + u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; + if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table), + "Invalid rxb index from HW %u\n", (u32)vid)) + goto out; rxb = trans_pcie->global_table[vid]; } else { rxb = rxq->queue[i]; rxq->queue[i] = NULL; } - IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i); + IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); i = (i + 1) & (rxq->queue_size - 1); @@ -1245,7 +1251,7 @@ restart: goto restart; } } - +out: /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); @@ -1301,6 +1307,9 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; + if (WARN_ON(entry->entry >= trans->num_rx_queues)) + return IRQ_NONE; + lock_map_acquire(&trans->sync_cmd_lockdep_map); local_bh_disable(); -- cgit v1.2.3 From 280452c9056421364111930b6336e27d85240438 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Tue, 23 Feb 2016 16:23:38 +0200 Subject: iwlwifi: mvm: disable DQA support Do not allow entrance into DQA flows until feature is completely ready and merged. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6d4e4879357c..7ea2baae0489 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -986,8 +986,9 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) static inline bool iwl_mvm_is_dqa_supported(struct iwl_mvm *mvm) { - return fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DQA_SUPPORT); + /* Make sure DQA isn't allowed in driver until feature is complete */ + return false && fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DQA_SUPPORT); } static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) -- cgit v1.2.3 From 33c85ead7115ad0a1c3c47d5d1d8f8474775d644 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Mon, 22 Feb 2016 15:44:13 +0200 Subject: iwlwifi: mvm: only release the trans ref if d0i3 is supported in fw If d0i3 is not supported by the firmware (or if it's disabled via module parameters) we shouldn't release the initial transport reference, so that we won't enter runtime suspend. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4716176cbfe0..f3283f4ea3df 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -672,8 +672,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx)); - /* rpm starts with a taken reference, we can release it now */ - iwl_trans_unref(mvm->trans); + /* The transport always starts with a taken reference, we can + * release it now if d0i3 is supported */ + if (iwl_mvm_is_d0i3_supported(mvm)) + iwl_trans_unref(mvm->trans); iwl_mvm_tof_init(mvm); -- cgit v1.2.3 From 0d0985adf6519952e0d51b5272abc3ef28d833bc Mon Sep 17 00:00:00 2001 From: Andrei Otcheretianski Date: Mon, 29 Feb 2016 13:25:48 +0200 Subject: iwlwifi: add disable_11ac module param Add module parameter that disables VHT capabilities. Signed-off-by: Andrei Otcheretianski Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 4 ++++ drivers/net/wireless/intel/iwlwifi/iwl-modparams.h | 2 ++ drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index ee4ffa599fd8..184c0fef37c0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1719,3 +1719,7 @@ MODULE_PARM_DESC(fw_monitor, module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_entry_delay, uint, S_IRUGO); MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)"); + +module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, + S_IRUGO); +MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index b88ecc7892a9..d1a5dd1602f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -115,6 +115,7 @@ enum iwl_amsdu_size { * entering D0i3 (in msecs) * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor + * @disable_11ac: disable VHT capabilities, default = false. */ struct iwl_mod_params { int sw_crypto; @@ -135,6 +136,7 @@ struct iwl_mod_params { unsigned int d0i3_entry_delay; bool lar_disable; bool fw_monitor; + bool disable_11ac; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index e84cb8d638a1..348135792f3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -452,7 +452,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, IEEE80211_BAND_5GHZ); iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ, tx_chains, rx_chains); - if (data->sku_cap_11ac_enable) + if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac) iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, tx_chains, rx_chains); -- cgit v1.2.3 From e27deb4583642d6c5b3cf18060e8239db1be9e59 Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Tue, 1 Mar 2016 10:30:48 +0200 Subject: iwlwifi: mvm: take the transport ref back when leaving If d0i3 is supported, we have released the initial transport reference in iwl_op_mode_mvm_start(), so we should take it back in iwl_op_mode_mvm_stop() to keep it balanced. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index f3283f4ea3df..3760a094b932 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -703,6 +703,13 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); int i; + /* If d0i3 is supported, we have released the reference that + * the transport started with, so we should take it back now + * that we are leaving. + */ + if (iwl_mvm_is_d0i3_supported(mvm)) + iwl_trans_ref(mvm->trans); + iwl_mvm_leds_exit(mvm); iwl_mvm_thermal_exit(mvm); -- cgit v1.2.3 From b4f7a9d168d9f8204abf06d244a7938dd62b87b0 Mon Sep 17 00:00:00 2001 From: Liad Kaufman Date: Wed, 3 Feb 2016 11:05:41 +0200 Subject: iwlwifi: mvm: support sw queue start/stop from mvm Add a wrapper function to allow stopping SW queues from MVM as well. Signed-off-by: Liad Kaufman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 4 +++ drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 38 ++++++++++++++++++---------- 2 files changed, 28 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 7ea2baae0489..f9019c4a5c0e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1556,6 +1556,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) iwl_trans_stop_device(mvm->trans); } +/* Stop/start all mac queues in a given bitmap */ +void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq); +void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 3760a094b932..e6d0adc07029 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -897,24 +897,17 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_common(mvm, rxb, pkt); } -static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int queue) +void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq) { - struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq; int q; - spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[queue].hw_queue_to_mac80211; - spin_unlock_bh(&mvm->queue_info_lock); - if (WARN_ON_ONCE(!mq)) return; for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) { IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) already stopped\n", - queue, q); + "mac80211 %d already stopped\n", q); continue; } @@ -934,24 +927,29 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, iwl_trans_block_txq_ptrs(mvm->trans, false); } -static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) +static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); unsigned long mq; - int q; spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->queue_info[queue].hw_queue_to_mac80211; + mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; spin_unlock_bh(&mvm->queue_info_lock); + iwl_mvm_stop_mac_queues(mvm, mq); +} + +void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) +{ + int q; + if (WARN_ON_ONCE(!mq)) return; for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) { if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) { IWL_DEBUG_TX_QUEUES(mvm, - "queue %d (mac80211 %d) still stopped\n", - queue, q); + "mac80211 %d still stopped\n", q); continue; } @@ -959,6 +957,18 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int queue) } } +static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) +{ + struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); + unsigned long mq; + + spin_lock_bh(&mvm->queue_info_lock); + mq = mvm->queue_info[hw_queue].hw_queue_to_mac80211; + spin_unlock_bh(&mvm->queue_info_lock); + + iwl_mvm_start_mac_queues(mvm, mq); +} + void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) { if (state) -- cgit v1.2.3 From 7cb43bebab52d8a9aa5943fbd1babe633a255e2c Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Sat, 27 Feb 2016 22:34:16 +0530 Subject: wan: lmc: Switch to using managed resources Use managed resource functions devm_kzalloc and pcim_enable_device to simplify error handling. Subsequently, remove unnecessary kfree, pci_disable_device and pci_release_regions. To be compatible with the change, various gotos are replaced with direct returns and unneeded labels are dropped. Also, `sc` was only being freed in the probe function and not the remove function before the change. By using devm_kzalloc this patch also fixes this memory leak. Signed-off-by: Amitoj Kaur Chawla Signed-off-by: David S. Miller --- drivers/net/wan/lmc/lmc_main.c | 27 +++++++-------------------- 1 file changed, 7 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c index 317bc79cc8b9..bb33b242ab48 100644 --- a/drivers/net/wan/lmc/lmc_main.c +++ b/drivers/net/wan/lmc/lmc_main.c @@ -826,7 +826,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* lmc_trace(dev, "lmc_init_one in"); */ - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) { printk(KERN_ERR "lmc: pci enable failed: %d\n", err); return err; @@ -835,23 +835,20 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_request_regions(pdev, "lmc"); if (err) { printk(KERN_ERR "lmc: pci_request_region failed\n"); - goto err_req_io; + return err; } /* * Allocate our own device structure */ - sc = kzalloc(sizeof(lmc_softc_t), GFP_KERNEL); - if (!sc) { - err = -ENOMEM; - goto err_kzalloc; - } + sc = devm_kzalloc(&pdev->dev, sizeof(lmc_softc_t), GFP_KERNEL); + if (!sc) + return -ENOMEM; dev = alloc_hdlcdev(sc); if (!dev) { printk(KERN_ERR "lmc:alloc_netdev for device failed\n"); - err = -ENOMEM; - goto err_hdlcdev; + return -ENOMEM; } @@ -888,7 +885,7 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { printk(KERN_ERR "%s: register_netdev failed.\n", dev->name); free_netdev(dev); - goto err_hdlcdev; + return err; } sc->lmc_cardtype = LMC_CARDTYPE_UNKNOWN; @@ -971,14 +968,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) lmc_trace(dev, "lmc_init_one out"); return 0; - -err_hdlcdev: - kfree(sc); -err_kzalloc: - pci_release_regions(pdev); -err_req_io: - pci_disable_device(pdev); - return err; } /* @@ -992,8 +981,6 @@ static void lmc_remove_one(struct pci_dev *pdev) printk(KERN_DEBUG "%s: removing...\n", dev->name); unregister_hdlc_device(dev); free_netdev(dev); - pci_release_regions(pdev); - pci_disable_device(pdev); } } -- cgit v1.2.3 From c2035eea3e9e1df768285fc8d72808469687aec0 Mon Sep 17 00:00:00 2001 From: Ram Amrani Date: Wed, 2 Mar 2016 20:26:00 +0200 Subject: qed: Correct BAR sizes for older MFW Driver learns the inner bar sized from a register configured by management firmware, but older versions are not setting this register. But since we know which values were configured back then, use them instead. Signed-off-by: Ram Amrani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 42 +++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d9a5175ebd04..706c61431e51 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -32,6 +32,33 @@ #include "qed_sp.h" /* API common to all protocols */ +enum BAR_ID { + BAR_ID_0, /* used for GRC */ + BAR_ID_1 /* Used for doorbells */ +}; + +static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, + enum BAR_ID bar_id) +{ + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + + if (val) + return 1 << (val + 15); + + /* Old MFW initialized above registered only conditionally */ + if (p_hwfn->cdev->num_hwfns > 1) { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 256kB for GRC and 512kB for DB\n"); + return BAR_ID_0 ? 256 * 1024 : 512 * 1024; + } else { + DP_INFO(p_hwfn, + "BAR size not configured. Assuming BAR size of 512kB for GRC and 512kB for DB\n"); + return 512 * 1024; + } +} + void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level) { @@ -1385,17 +1412,6 @@ err0: return rc; } -static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn, - u8 bar_id) -{ - u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE - : PGLUE_B_REG_PF_BAR1_SIZE); - u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); - - /* Get the BAR size(in KB) from hardware given val */ - return 1 << (val + 15); -} - int qed_hw_prepare(struct qed_dev *cdev, int personality) { @@ -1420,11 +1436,11 @@ int qed_hw_prepare(struct qed_dev *cdev, u8 __iomem *addr; /* adjust bar offset for second engine */ - addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2; + addr = cdev->regview + qed_hw_bar_size(p_hwfn, BAR_ID_0) / 2; p_regview = addr; /* adjust doorbell bar offset for second engine */ - addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2; + addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, BAR_ID_1) / 2; p_doorbell = addr; /* prepare second hw function */ -- cgit v1.2.3 From 12e09c694a68a3d71c7c655eeb689b7fe43b09f5 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 2 Mar 2016 20:26:01 +0200 Subject: qed: Support B0 instead of A0 BB_A0 is a development model that is will not reach actual clients. In fact, future firmware would simply fail to initialize such chip. This changes the configuration into B0 instead of A0, and adds a safeguard against the slim chance someone would actually try this with an A0 adapter in which case probe would gracefully fail. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 41 +++++++++++++++++-------------- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 2 +- 2 files changed, 24 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 706c61431e51..2e02e052cb87 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -420,7 +420,7 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn) { int hw_mode = 0; - hw_mode = (1 << MODE_BB_A0); + hw_mode = (1 << MODE_BB_B0); switch (p_hwfn->cdev->num_ports_in_engines) { case 1: @@ -976,18 +976,8 @@ static void qed_hw_hwfn_free(struct qed_hwfn *p_hwfn) } /* Setup bar access */ -static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) +static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) { - int rc; - - /* Allocate PTT pool */ - rc = qed_ptt_pool_alloc(p_hwfn); - if (rc) - return rc; - - /* Allocate the main PTT */ - p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); - /* clear indirect access */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); @@ -1002,8 +992,6 @@ static int qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn) /* enable internal target-read */ qed_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - - return 0; } static void get_function_id(struct qed_hwfn *p_hwfn) @@ -1311,7 +1299,7 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn, return rc; } -static void qed_get_dev_info(struct qed_dev *cdev) +static int qed_get_dev_info(struct qed_dev *cdev) { struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); u32 tmp; @@ -1350,6 +1338,14 @@ static void qed_get_dev_info(struct qed_dev *cdev) "Chip details - Num: %04x Rev: %04x Bond id: %04x Metal: %04x\n", cdev->chip_num, cdev->chip_rev, cdev->chip_bond_id, cdev->chip_metal); + + if (QED_IS_BB(cdev) && CHIP_REV_IS_A0(cdev)) { + DP_NOTICE(cdev->hwfns, + "The chip type/rev (BB A0) is not supported!\n"); + return -EINVAL; + } + + return 0; } static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, @@ -1372,15 +1368,24 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn, get_function_id(p_hwfn); - rc = qed_hw_hwfn_prepare(p_hwfn); + /* Allocate PTT pool */ + rc = qed_ptt_pool_alloc(p_hwfn); if (rc) { DP_NOTICE(p_hwfn, "Failed to prepare hwfn's hw\n"); goto err0; } + /* Allocate the main PTT */ + p_hwfn->p_main_ptt = qed_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); + /* First hwfn learns basic information, e.g., number of hwfns */ - if (!p_hwfn->my_id) - qed_get_dev_info(p_hwfn->cdev); + if (!p_hwfn->my_id) { + rc = qed_get_dev_info(p_hwfn->cdev); + if (rc != 0) + goto err1; + } + + qed_hw_hwfn_prepare(p_hwfn); /* Initialize MCP structure */ rc = qed_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 49bbf696a16d..592e0e6d9b42 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -968,7 +968,7 @@ struct igu_msix_vector { enum init_modes { MODE_BB_A0, - MODE_RESERVED, + MODE_BB_B0, MODE_RESERVED2, MODE_ASIC, MODE_RESERVED3, -- cgit v1.2.3 From 86622ee75312568e9a862b6f023e54b149380b6d Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 2 Mar 2016 20:26:02 +0200 Subject: qed: Move statistics to L2 code Current statistics logic is meant for L2, not for all future protocols. Move this content to the proper designated file. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 244 +------------------ drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 2 - drivers/net/ethernet/qlogic/qed/qed_l2.c | 323 ++++++++++++++++++++++++++ 3 files changed, 324 insertions(+), 245 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 2e02e052cb87..d08078fd7f82 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -677,10 +677,8 @@ int qed_hw_init(struct qed_dev *cdev, bool allow_npar_tx_switch, const u8 *bin_fw_data) { - struct qed_storm_stats *p_stat; - u32 load_code, param, *p_address; + u32 load_code, param; int rc, mfw_rc, i; - u8 fw_vport = 0; rc = qed_init_fw_data(cdev, bin_fw_data); if (rc != 0) @@ -689,10 +687,6 @@ int qed_hw_init(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - rc = qed_fw_vport(p_hwfn, 0, &fw_vport); - if (rc != 0) - return rc; - /* Enable DMAE in PXP */ rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); @@ -756,25 +750,6 @@ int qed_hw_init(struct qed_dev *cdev, } p_hwfn->hw_init_done = true; - - /* init PF stats */ - p_stat = &p_hwfn->storm_stats; - p_stat->mstats.address = BAR0_MAP_REG_MSDM_RAM + - MSTORM_QUEUE_STAT_OFFSET(fw_vport); - p_stat->mstats.len = sizeof(struct eth_mstorm_per_queue_stat); - - p_stat->ustats.address = BAR0_MAP_REG_USDM_RAM + - USTORM_QUEUE_STAT_OFFSET(fw_vport); - p_stat->ustats.len = sizeof(struct eth_ustorm_per_queue_stat); - - p_stat->pstats.address = BAR0_MAP_REG_PSDM_RAM + - PSTORM_QUEUE_STAT_OFFSET(fw_vport); - p_stat->pstats.len = sizeof(struct eth_pstorm_per_queue_stat); - - p_address = &p_stat->tstats.address; - *p_address = BAR0_MAP_REG_TSDM_RAM + - TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); - p_stat->tstats.len = sizeof(struct tstorm_per_port_stat); } return 0; @@ -1557,223 +1532,6 @@ void qed_chain_free(struct qed_dev *cdev, p_chain->p_phys_addr); } -static void __qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) -{ - int i, j; - - memset(stats, 0, sizeof(*stats)); - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct eth_mstorm_per_queue_stat mstats; - struct eth_ustorm_per_queue_stat ustats; - struct eth_pstorm_per_queue_stat pstats; - struct tstorm_per_port_stat tstats; - struct port_stats port_stats; - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); - - if (!p_ptt) { - DP_ERR(p_hwfn, "Failed to acquire ptt\n"); - continue; - } - - memset(&mstats, 0, sizeof(mstats)); - qed_memcpy_from(p_hwfn, p_ptt, &mstats, - p_hwfn->storm_stats.mstats.address, - p_hwfn->storm_stats.mstats.len); - - memset(&ustats, 0, sizeof(ustats)); - qed_memcpy_from(p_hwfn, p_ptt, &ustats, - p_hwfn->storm_stats.ustats.address, - p_hwfn->storm_stats.ustats.len); - - memset(&pstats, 0, sizeof(pstats)); - qed_memcpy_from(p_hwfn, p_ptt, &pstats, - p_hwfn->storm_stats.pstats.address, - p_hwfn->storm_stats.pstats.len); - - memset(&tstats, 0, sizeof(tstats)); - qed_memcpy_from(p_hwfn, p_ptt, &tstats, - p_hwfn->storm_stats.tstats.address, - p_hwfn->storm_stats.tstats.len); - - memset(&port_stats, 0, sizeof(port_stats)); - - if (p_hwfn->mcp_info) - qed_memcpy_from(p_hwfn, p_ptt, &port_stats, - p_hwfn->mcp_info->port_addr + - offsetof(struct public_port, stats), - sizeof(port_stats)); - qed_ptt_release(p_hwfn, p_ptt); - - stats->no_buff_discards += - HILO_64_REGPAIR(mstats.no_buff_discard); - stats->packet_too_big_discard += - HILO_64_REGPAIR(mstats.packet_too_big_discard); - stats->ttl0_discard += - HILO_64_REGPAIR(mstats.ttl0_discard); - stats->tpa_coalesced_pkts += - HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); - stats->tpa_coalesced_events += - HILO_64_REGPAIR(mstats.tpa_coalesced_events); - stats->tpa_aborts_num += - HILO_64_REGPAIR(mstats.tpa_aborts_num); - stats->tpa_coalesced_bytes += - HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); - - stats->rx_ucast_bytes += - HILO_64_REGPAIR(ustats.rcv_ucast_bytes); - stats->rx_mcast_bytes += - HILO_64_REGPAIR(ustats.rcv_mcast_bytes); - stats->rx_bcast_bytes += - HILO_64_REGPAIR(ustats.rcv_bcast_bytes); - stats->rx_ucast_pkts += - HILO_64_REGPAIR(ustats.rcv_ucast_pkts); - stats->rx_mcast_pkts += - HILO_64_REGPAIR(ustats.rcv_mcast_pkts); - stats->rx_bcast_pkts += - HILO_64_REGPAIR(ustats.rcv_bcast_pkts); - - stats->mftag_filter_discards += - HILO_64_REGPAIR(tstats.mftag_filter_discard); - stats->mac_filter_discards += - HILO_64_REGPAIR(tstats.eth_mac_filter_discard); - - stats->tx_ucast_bytes += - HILO_64_REGPAIR(pstats.sent_ucast_bytes); - stats->tx_mcast_bytes += - HILO_64_REGPAIR(pstats.sent_mcast_bytes); - stats->tx_bcast_bytes += - HILO_64_REGPAIR(pstats.sent_bcast_bytes); - stats->tx_ucast_pkts += - HILO_64_REGPAIR(pstats.sent_ucast_pkts); - stats->tx_mcast_pkts += - HILO_64_REGPAIR(pstats.sent_mcast_pkts); - stats->tx_bcast_pkts += - HILO_64_REGPAIR(pstats.sent_bcast_pkts); - stats->tx_err_drop_pkts += - HILO_64_REGPAIR(pstats.error_drop_pkts); - stats->rx_64_byte_packets += port_stats.pmm.r64; - stats->rx_127_byte_packets += port_stats.pmm.r127; - stats->rx_255_byte_packets += port_stats.pmm.r255; - stats->rx_511_byte_packets += port_stats.pmm.r511; - stats->rx_1023_byte_packets += port_stats.pmm.r1023; - stats->rx_1518_byte_packets += port_stats.pmm.r1518; - stats->rx_1522_byte_packets += port_stats.pmm.r1522; - stats->rx_2047_byte_packets += port_stats.pmm.r2047; - stats->rx_4095_byte_packets += port_stats.pmm.r4095; - stats->rx_9216_byte_packets += port_stats.pmm.r9216; - stats->rx_16383_byte_packets += port_stats.pmm.r16383; - stats->rx_crc_errors += port_stats.pmm.rfcs; - stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; - stats->rx_pause_frames += port_stats.pmm.rxpf; - stats->rx_pfc_frames += port_stats.pmm.rxpp; - stats->rx_align_errors += port_stats.pmm.raln; - stats->rx_carrier_errors += port_stats.pmm.rfcr; - stats->rx_oversize_packets += port_stats.pmm.rovr; - stats->rx_jabbers += port_stats.pmm.rjbr; - stats->rx_undersize_packets += port_stats.pmm.rund; - stats->rx_fragments += port_stats.pmm.rfrg; - stats->tx_64_byte_packets += port_stats.pmm.t64; - stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; - stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; - stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; - stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; - stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; - stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; - stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; - stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; - stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; - stats->tx_pause_frames += port_stats.pmm.txpf; - stats->tx_pfc_frames += port_stats.pmm.txpp; - stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; - stats->tx_total_collisions += port_stats.pmm.tncl; - stats->rx_mac_bytes += port_stats.pmm.rbyte; - stats->rx_mac_uc_packets += port_stats.pmm.rxuca; - stats->rx_mac_mc_packets += port_stats.pmm.rxmca; - stats->rx_mac_bc_packets += port_stats.pmm.rxbca; - stats->rx_mac_frames_ok += port_stats.pmm.rxpok; - stats->tx_mac_bytes += port_stats.pmm.tbyte; - stats->tx_mac_uc_packets += port_stats.pmm.txuca; - stats->tx_mac_mc_packets += port_stats.pmm.txmca; - stats->tx_mac_bc_packets += port_stats.pmm.txbca; - stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; - - for (j = 0; j < 8; j++) { - stats->brb_truncates += port_stats.brb.brb_truncate[j]; - stats->brb_discards += port_stats.brb.brb_discard[j]; - } - } -} - -void qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) -{ - u32 i; - - if (!cdev) { - memset(stats, 0, sizeof(*stats)); - return; - } - - __qed_get_vport_stats(cdev, stats); - - if (!cdev->reset_stats) - return; - - /* Reduce the statistics baseline */ - for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) - ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; -} - -/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ -void qed_reset_vport_stats(struct qed_dev *cdev) -{ - int i; - - for_each_hwfn(cdev, i) { - struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct eth_mstorm_per_queue_stat mstats; - struct eth_ustorm_per_queue_stat ustats; - struct eth_pstorm_per_queue_stat pstats; - struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); - - if (!p_ptt) { - DP_ERR(p_hwfn, "Failed to acquire ptt\n"); - continue; - } - - memset(&mstats, 0, sizeof(mstats)); - qed_memcpy_to(p_hwfn, p_ptt, - p_hwfn->storm_stats.mstats.address, - &mstats, - p_hwfn->storm_stats.mstats.len); - - memset(&ustats, 0, sizeof(ustats)); - qed_memcpy_to(p_hwfn, p_ptt, - p_hwfn->storm_stats.ustats.address, - &ustats, - p_hwfn->storm_stats.ustats.len); - - memset(&pstats, 0, sizeof(pstats)); - qed_memcpy_to(p_hwfn, p_ptt, - p_hwfn->storm_stats.pstats.address, - &pstats, - p_hwfn->storm_stats.pstats.len); - - qed_ptt_release(p_hwfn, p_ptt); - } - - /* PORT statistics are not necessarily reset, so we need to - * read and create a baseline for future statistics. - */ - if (!cdev->reset_stats) - DP_INFO(cdev, "Reset stats not allocated\n"); - else - __qed_get_vport_stats(cdev, cdev->reset_stats); -} - int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index e29a3ba6c8b0..155f26b938fc 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -156,8 +156,6 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); -void qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); enum qed_dmae_address_type_t { diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index bba59c51f72c..102ddc73b841 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -31,6 +31,7 @@ #include "qed_hsi.h" #include "qed_hw.h" #include "qed_int.h" +#include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" @@ -1231,6 +1232,328 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev, return rc; } +/* Statistics related code */ +static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, + u16 statistics_bin) +{ + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); +} + +static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_pstorm_per_queue_stat pstats; + u32 pstats_addr = 0, pstats_len = 0; + + __qed_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, + statistics_bin); + + memset(&pstats, 0, sizeof(pstats)); + qed_memcpy_from(p_hwfn, p_ptt, &pstats, + pstats_addr, pstats_len); + + p_stats->tx_ucast_bytes += + HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->tx_mcast_bytes += + HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->tx_bcast_bytes += + HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->tx_ucast_pkts += + HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->tx_mcast_pkts += + HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->tx_bcast_pkts += + HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->tx_err_drop_pkts += + HILO_64_REGPAIR(pstats.error_drop_pkts); +} + +static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len) +{ + *p_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + *p_len = sizeof(struct tstorm_per_port_stat); +} + +static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + u32 tstats_addr = 0, tstats_len = 0; + struct tstorm_per_port_stat tstats; + + __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len); + + memset(&tstats, 0, sizeof(tstats)); + qed_memcpy_from(p_hwfn, p_ptt, &tstats, + tstats_addr, tstats_len); + + p_stats->mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + p_stats->mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); +} + +static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, + u16 statistics_bin) +{ + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); +} + +static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_ustorm_per_queue_stat ustats; + u32 ustats_addr = 0, ustats_len = 0; + + __qed_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, + statistics_bin); + + memset(&ustats, 0, sizeof(ustats)); + qed_memcpy_from(p_hwfn, p_ptt, &ustats, + ustats_addr, ustats_len); + + p_stats->rx_ucast_bytes += + HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->rx_mcast_bytes += + HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->rx_bcast_bytes += + HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->rx_ucast_pkts += + HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->rx_mcast_pkts += + HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->rx_bcast_pkts += + HILO_64_REGPAIR(ustats.rcv_bcast_pkts); +} + +static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn, + u32 *p_addr, + u32 *p_len, + u16 statistics_bin) +{ + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); +} + +static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_mstorm_per_queue_stat mstats; + u32 mstats_addr = 0, mstats_len = 0; + + __qed_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, + statistics_bin); + + memset(&mstats, 0, sizeof(mstats)); + qed_memcpy_from(p_hwfn, p_ptt, &mstats, + mstats_addr, mstats_len); + + p_stats->no_buff_discards += + HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + p_stats->ttl0_discard += + HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + p_stats->tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + p_stats->tpa_aborts_num += + HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); +} + +static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *p_stats) +{ + struct port_stats port_stats; + int j; + + memset(&port_stats, 0, sizeof(port_stats)); + + qed_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, stats), + sizeof(port_stats)); + + p_stats->rx_64_byte_packets += port_stats.pmm.r64; + p_stats->rx_127_byte_packets += port_stats.pmm.r127; + p_stats->rx_255_byte_packets += port_stats.pmm.r255; + p_stats->rx_511_byte_packets += port_stats.pmm.r511; + p_stats->rx_1023_byte_packets += port_stats.pmm.r1023; + p_stats->rx_1518_byte_packets += port_stats.pmm.r1518; + p_stats->rx_1522_byte_packets += port_stats.pmm.r1522; + p_stats->rx_2047_byte_packets += port_stats.pmm.r2047; + p_stats->rx_4095_byte_packets += port_stats.pmm.r4095; + p_stats->rx_9216_byte_packets += port_stats.pmm.r9216; + p_stats->rx_16383_byte_packets += port_stats.pmm.r16383; + p_stats->rx_crc_errors += port_stats.pmm.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; + p_stats->rx_pause_frames += port_stats.pmm.rxpf; + p_stats->rx_pfc_frames += port_stats.pmm.rxpp; + p_stats->rx_align_errors += port_stats.pmm.raln; + p_stats->rx_carrier_errors += port_stats.pmm.rfcr; + p_stats->rx_oversize_packets += port_stats.pmm.rovr; + p_stats->rx_jabbers += port_stats.pmm.rjbr; + p_stats->rx_undersize_packets += port_stats.pmm.rund; + p_stats->rx_fragments += port_stats.pmm.rfrg; + p_stats->tx_64_byte_packets += port_stats.pmm.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; + p_stats->tx_pause_frames += port_stats.pmm.txpf; + p_stats->tx_pfc_frames += port_stats.pmm.txpp; + p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; + p_stats->tx_total_collisions += port_stats.pmm.tncl; + p_stats->rx_mac_bytes += port_stats.pmm.rbyte; + p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; + p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; + p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; + p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; + p_stats->tx_mac_bytes += port_stats.pmm.tbyte; + p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; + p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; + p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + for (j = 0; j < 8; j++) { + p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; + p_stats->brb_discards += port_stats.brb.brb_discard[j]; + } +} + +static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_eth_stats *stats, + u16 statistics_bin) +{ + __qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); + __qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); + + if (p_hwfn->mcp_info) + __qed_get_vport_port_stats(p_hwfn, p_ptt, stats); +} + +static void _qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + u8 fw_vport = 0; + int i; + + memset(stats, 0, sizeof(*stats)); + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct qed_ptt *p_ptt; + + /* The main vport index is relative first */ + if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + continue; + } + + p_ptt = qed_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport); + + qed_ptt_release(p_hwfn, p_ptt); + } +} + +void qed_get_vport_stats(struct qed_dev *cdev, + struct qed_eth_stats *stats) +{ + u32 i; + + if (!cdev) { + memset(stats, 0, sizeof(*stats)); + return; + } + + _qed_get_vport_stats(cdev, stats); + + if (!cdev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct qed_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)cdev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void qed_reset_vport_stats(struct qed_dev *cdev) +{ + int i; + + for_each_hwfn(cdev, i) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn); + u32 addr = 0, len = 0; + + if (!p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + memset(&mstats, 0, sizeof(mstats)); + __qed_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); + + memset(&ustats, 0, sizeof(ustats)); + __qed_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); + + memset(&pstats, 0, sizeof(pstats)); + __qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); + qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); + + qed_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + */ + if (!cdev->reset_stats) + DP_INFO(cdev, "Reset stats not allocated\n"); + else + _qed_get_vport_stats(cdev, cdev->reset_stats); +} + static int qed_fill_eth_dev_info(struct qed_dev *cdev, struct qed_dev_eth_info *info) { -- cgit v1.2.3 From 8c925c443801721d2731e540055782bcdbbde03b Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 2 Mar 2016 20:26:03 +0200 Subject: qed: Fix error flow on slowpath start In case of problems when initializing the chip, the error flows aren't being properly done. Specifically, it's possible that the chip would be left in a configuration allowing it [internally] to access the host memory, causing fatal problems in the device that would require power cycle to overcome. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 89 +++++++++++++++------------ drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 9 +++ drivers/net/ethernet/qlogic/qed/qed_main.c | 10 +-- 3 files changed, 64 insertions(+), 44 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d08078fd7f82..c3f293d17991 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -756,10 +756,54 @@ int qed_hw_init(struct qed_dev *cdev, } #define QED_HW_STOP_RETRY_LIMIT (10) +static inline void qed_hw_timers_stop(struct qed_dev *cdev, + struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + int i; + + /* close timers */ + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + + for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { + if ((!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!qed_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + /* Dependent on number of connection/tasks, possibly + * 1ms sleep is required between polls + */ + usleep_range(1000, 2000); + } + + if (i < QED_HW_STOP_RETRY_LIMIT) + return; + + DP_NOTICE(p_hwfn, + "Timers linear scans are not over [Connection %02x Tasks %02x]\n", + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)qed_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)); +} + +void qed_hw_timers_stop_all(struct qed_dev *cdev) +{ + int j; + + for_each_hwfn(cdev, j) { + struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; + struct qed_ptt *p_ptt = p_hwfn->p_main_ptt; + + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); + } +} + int qed_hw_stop(struct qed_dev *cdev) { int rc = 0, t_rc; - int i, j; + int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; @@ -772,7 +816,8 @@ int qed_hw_stop(struct qed_dev *cdev) rc = qed_sp_pf_stop(p_hwfn); if (rc) - return rc; + DP_NOTICE(p_hwfn, + "Failed to close PF against FW. Continue to stop HW to prevent illegal host access by the device\n"); qed_wr(p_hwfn, p_ptt, NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); @@ -783,24 +828,7 @@ int qed_hw_stop(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); - for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { - if ((!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) - break; - - usleep_range(1000, 2000); - } - if (i == QED_HW_STOP_RETRY_LIMIT) - DP_NOTICE(p_hwfn, - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN), - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK)); + qed_hw_timers_stop(cdev, p_hwfn, p_ptt); /* Disable Attention Generation */ qed_int_igu_disable_int(p_hwfn, p_ptt); @@ -829,7 +857,7 @@ int qed_hw_stop(struct qed_dev *cdev) void qed_hw_stop_fastpath(struct qed_dev *cdev) { - int i, j; + int j; for_each_hwfn(cdev, j) { struct qed_hwfn *p_hwfn = &cdev->hwfns[j]; @@ -848,25 +876,6 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev) qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0); qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); - qed_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); - for (i = 0; i < QED_HW_STOP_RETRY_LIMIT; i++) { - if ((!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN)) && - (!qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK))) - break; - - usleep_range(1000, 2000); - } - if (i == QED_HW_STOP_RETRY_LIMIT) - DP_NOTICE(p_hwfn, - "Timers linear scans are not over [Connection %02x Tasks %02x]\n", - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_CONN), - (u8)qed_rd(p_hwfn, p_ptt, - TM_REG_PF_SCAN_ACTIVE_TASK)); - qed_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); /* Need to wait 1ms to guarantee SBs are cleared */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index 155f26b938fc..d6c7ddf4f4d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -77,6 +77,15 @@ int qed_hw_init(struct qed_dev *cdev, bool allow_npar_tx_switch, const u8 *bin_fw_data); +/** + * @brief qed_hw_timers_stop_all - stop the timers HW block + * + * @param cdev + * + * @return void + */ +void qed_hw_timers_stop_all(struct qed_dev *cdev); + /** * @brief qed_hw_stop - * diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index caa689e6575c..26d40db07ddd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -779,7 +779,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, true, data); if (rc) - goto err3; + goto err2; DP_INFO(cdev, "HW initialization and function start completed successfully\n"); @@ -798,12 +798,14 @@ static int qed_slowpath_start(struct qed_dev *cdev, return rc; } + qed_reset_vport_stats(cdev); + return 0; -err3: - qed_free_stream_mem(cdev); - qed_slowpath_irq_free(cdev); err2: + qed_hw_timers_stop_all(cdev); + qed_slowpath_irq_free(cdev); + qed_free_stream_mem(cdev); qed_disable_msix(cdev); err1: qed_resc_free(cdev); -- cgit v1.2.3 From 619db46b259749b2ffe79aeea2247d7573861586 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 2 Mar 2016 20:26:04 +0200 Subject: qed: Remove unused NVM vendor ID Remove 2 unused fields from driver code. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 3 --- drivers/net/ethernet/qlogic/qed/qed_dev.c | 7 ------- 2 files changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 66b021e3c1be..e5604eec81bf 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -146,9 +146,6 @@ struct qed_hw_info { u16 ovlan; u32 part_num[4]; - u32 vendor_id; - u32 device_id; - unsigned char hw_mac_addr[ETH_ALEN]; struct qed_igu_info *p_igu_info; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index c3f293d17991..b7d100f6bd6f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -1083,13 +1083,6 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ nvm_cfg1_offset = qed_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); - /* Read Vendor Id / Device Id */ - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + - offsetof(struct nvm_cfg1, glob) + - offsetof(struct nvm_cfg1_glob, pci_id); - p_hwfn->hw_info.vendor_id = qed_rd(p_hwfn, p_ptt, addr) & - NVM_CFG1_GLOB_VENDOR_ID_MASK; - addr = MCP_REG_SCRATCH + nvm_cfg1_offset + offsetof(struct nvm_cfg1, glob) + offsetof(struct nvm_cfg1_glob, core_cfg); -- cgit v1.2.3 From 495db27302a88fcabb105c3d30a617beda913945 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:27 +0100 Subject: stmmac: share reset function between dwmac100 and dwmac1000 This patch is to share the same reset procedure between dwmac100 and dwmac1000 chips. This will also help on enhancing the driver and support new chips. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 3 ++- drivers/net/ethernet/stmicro/stmmac/dwmac100.h | 1 - drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | 1 - .../net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 22 ++++------------------ drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 22 +++------------------- drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | 5 +++++ drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | 21 +++++++++++++++++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 14 +++++++++++--- 8 files changed, 46 insertions(+), 43 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 1e19c8fd8b82..bac0e44d7634 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -376,7 +376,8 @@ extern const struct stmmac_desc_ops ndesc_ops; /* Specific DMA helpers */ struct stmmac_dma_ops { /* DMA core initialization */ - int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, + int (*reset)(void __iomem *ioaddr); + void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb, int burst_len, u32 dma_tx, u32 dma_rx, int atds); /* Dump DMA registers */ void (*dump_regs) (void __iomem *ioaddr); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 2ec6aeae349e..1657acfa70c2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -95,7 +95,6 @@ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ #define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */ -#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ #define DMA_BUS_MODE_DEFAULT 0x00000000 /* DMA Control register defines */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 8831a053ac13..9d36ae788429 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -221,7 +221,6 @@ enum inter_frame_gap { /*--- DMA BLOCK defines ---*/ /* DMA Bus Mode register defines */ -#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ #define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 0e8937c1184a..5f0aea56b298 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -30,23 +30,10 @@ #include "dwmac1000.h" #include "dwmac_dma.h" -static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds) +static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds) { - u32 value = readl(ioaddr + DMA_BUS_MODE); - int limit; - - /* DMA SW reset */ - value |= DMA_BUS_MODE_SFT_RESET; - writel(value, ioaddr + DMA_BUS_MODE); - limit = 10; - while (limit--) { - if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) - break; - mdelay(10); - } - if (limit < 0) - return -EBUSY; + u32 value; /* * Set the DMA PBL (Programmable Burst Length) mode @@ -102,8 +89,6 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, */ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); - - return 0; } static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz) @@ -205,6 +190,7 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) } const struct stmmac_dma_ops dwmac1000_dma_ops = { + .reset = dwmac_dma_reset, .init = dwmac1000_dma_init, .dump_regs = dwmac1000_dump_dma_regs, .dma_mode = dwmac1000_dma_operation_mode, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index 9d0971c1c2ee..c40582a938a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -32,24 +32,9 @@ #include "dwmac100.h" #include "dwmac_dma.h" -static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds) +static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, + int burst_len, u32 dma_tx, u32 dma_rx, int atds) { - u32 value = readl(ioaddr + DMA_BUS_MODE); - int limit; - - /* DMA SW reset */ - value |= DMA_BUS_MODE_SFT_RESET; - writel(value, ioaddr + DMA_BUS_MODE); - limit = 10; - while (limit--) { - if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) - break; - mdelay(10); - } - if (limit < 0) - return -EBUSY; - /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), ioaddr + DMA_BUS_MODE); @@ -62,8 +47,6 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, */ writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR); writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR); - - return 0; } /* Store and Forward capability is not used at all. @@ -131,6 +114,7 @@ static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x, } const struct stmmac_dma_ops dwmac100_dma_ops = { + .reset = dwmac_dma_reset, .init = dwmac100_dma_init, .dump_regs = dwmac100_dump_dma_regs, .dma_mode = dwmac100_dma_operation_mode, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index def266da55db..13ca90e23479 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -35,6 +35,10 @@ #define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ #define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ #define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ + +/* SW Reset */ +#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */ + /* Rx watchdog register */ #define DMA_RX_WATCHDOG 0x00001024 /* AXI Bus Mode */ @@ -112,5 +116,6 @@ void dwmac_dma_stop_tx(void __iomem *ioaddr); void dwmac_dma_start_rx(void __iomem *ioaddr); void dwmac_dma_stop_rx(void __iomem *ioaddr); int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); +int dwmac_dma_reset(void __iomem *ioaddr); #endif /* __DWMAC_DMA_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 484e3cf9c414..84e3e84cec7d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c @@ -26,6 +26,27 @@ #define GMAC_HI_REG_AE 0x80000000 +int dwmac_dma_reset(void __iomem *ioaddr) +{ + u32 value = readl(ioaddr + DMA_BUS_MODE); + int limit; + + /* DMA SW reset */ + value |= DMA_BUS_MODE_SFT_RESET; + writel(value, ioaddr + DMA_BUS_MODE); + limit = 10; + while (limit--) { + if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET)) + break; + mdelay(10); + } + + if (limit < 0) + return -EBUSY; + + return 0; +} + /* CSR1 enables the transmit DMA to check for new descriptor */ void dwmac_enable_dma_transmission(void __iomem *ioaddr) { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c21015b68097..13752e933e43 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1638,6 +1638,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; int mixed_burst = 0; int atds = 0; + int ret = 0; if (priv->plat->dma_cfg) { pbl = priv->plat->dma_cfg->pbl; @@ -1649,9 +1650,16 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) atds = 1; - return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, - burst_len, priv->dma_tx_phy, - priv->dma_rx_phy, atds); + ret = priv->hw->dma->reset(priv->ioaddr); + if (ret) { + dev_err(priv->device, "Failed to reset the dma\n"); + return ret; + } + + priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, + burst_len, priv->dma_tx_phy, + priv->dma_rx_phy, atds); + return ret; } /** -- cgit v1.2.3 From afea03656add70a0e00f5b0039f87288c7af8b9f Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:28 +0100 Subject: stmmac: rework DMA bus setting and introduce new platform AXI structure This patch restructures the DMA bus settings and this is done by introducing a new platform structure used for programming the AXI Bus Mode Register inside the DMA module. This structure can be populated from device-tree as documented in the binding txt file. After initializing the DMA, the AXI register can be optionally tuned for platform drivers based. This patch also reworks some parameters to make coherent the DMA configuration now that AXI register is introduced. For example, the burst_len is managed by using the mentioned axi support above; so the snps,burst-len parameter has been removed. It makes sense to provide the AAL parameter from DT to Address-Aligned Beats inside the Register0 and review the PBL settings when initialize the engine. For PCI glue, rebuilding the story of this setting, it was added to align a configuration so not for fixing some known problem. No issue raised after this patch. It is safe to use the default burst length instead of tuning it to the maximum value Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/stmmac.txt | 54 ++++++++---- drivers/net/ethernet/stmicro/stmmac/common.h | 5 +- drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | 2 +- .../net/ethernet/stmicro/stmmac/dwmac1000_dma.c | 99 +++++++++++++++------- drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 2 +- drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | 34 +++++++- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 12 ++- drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | 4 +- .../net/ethernet/stmicro/stmmac/stmmac_platform.c | 42 ++++++++- include/linux/stmmac.h | 17 +++- 10 files changed, 209 insertions(+), 62 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index e862a922bd3f..6605d19601c2 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt @@ -17,7 +17,25 @@ Required properties: The 1st cell is reset pre-delay in micro seconds. The 2nd cell is reset pulse in micro seconds. The 3rd cell is reset post-delay in micro seconds. + +Optional properties: +- resets: Should contain a phandle to the STMMAC reset signal, if any +- reset-names: Should contain the reset signal name "stmmaceth", if a + reset phandle is given +- max-frame-size: See ethernet.txt file in the same directory +- clocks: If present, the first clock should be the GMAC main clock and + the second clock should be peripheral's register interface clock. Further + clocks may be specified in derived bindings. +- clock-names: One name for each entry in the clocks property, the + first one should be "stmmaceth" and the second one should be "pclk". +- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is + available this clock is used for programming the Timestamp Addend Register. + If not passed then the system clock will be used and this is fine on some + platforms. +- tx-fifo-depth: See ethernet.txt file in the same directory +- rx-fifo-depth: See ethernet.txt file in the same directory - snps,pbl Programmable Burst Length +- snps,aal Address-Aligned Beats - snps,fixed-burst Program the DMA to use the fixed burst mode - snps,mixed-burst Program the DMA to use the mixed burst mode - snps,force_thresh_dma_mode Force DMA to use the threshold mode for @@ -29,27 +47,28 @@ Required properties: supported by this device instance - snps,perfect-filter-entries: Number of perfect filter entries supported by this device instance - -Optional properties: -- resets: Should contain a phandle to the STMMAC reset signal, if any -- reset-names: Should contain the reset signal name "stmmaceth", if a - reset phandle is given -- max-frame-size: See ethernet.txt file in the same directory -- clocks: If present, the first clock should be the GMAC main clock - The optional second clock should be peripheral's register interface clock. - The third optional clock should be the ptp reference clock. - Further clocks may be specified in derived bindings. -- clock-names: One name for each entry in the clocks property. - The first one should be "stmmaceth". - The optional second one should be "pclk". - The optional third one should be "clk_ptp_ref". -- snps,burst_len: The AXI burst lenth value of the AXI BUS MODE register. -- tx-fifo-depth: See ethernet.txt file in the same directory -- rx-fifo-depth: See ethernet.txt file in the same directory +- AXI BUS Mode parameters: below the list of all the parameters to program the + AXI register inside the DMA module: + - snps,lpi_en: enable Low Power Interface + - snps,xit_frm: unlock on WoL + - snps,wr_osr_lmt: max write oustanding req. limit + - snps,rd_osr_lmt: max read oustanding req. limit + - snps,kbbe: do not cross 1KiB boundary. + - snps,axi_all: align address + - snps,blen: this is a vector of supported burst length. + - snps,fb: fixed-burst + - snps,mb: mixed-burst + - snps,rb: rebuild INCRx Burst - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus. Examples: + stmmac_axi_setup: stmmac-axi-config { + snps,wr_osr_lmt = <0xf>; + snps,rd_osr_lmt = <0xf>; + snps,blen = <256 128 64 32 0 0 0>; + }; + gmac0: ethernet@e0800000 { compatible = "st,spear600-gmac"; reg = <0xe0800000 0x8000>; @@ -65,6 +84,7 @@ Examples: tx-fifo-depth = <16384>; clocks = <&clock>; clock-names = "stmmaceth"; + snps,axi-config = <&stmmac_axi_setup>; mdio0 { #address-cells = <1>; #size-cells = <0>; diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index bac0e44d7634..586a33624dd2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -27,6 +27,7 @@ #include #include +#include #include #include #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) @@ -378,7 +379,9 @@ struct stmmac_dma_ops { /* DMA core initialization */ int (*reset)(void __iomem *ioaddr); void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds); + int aal, u32 dma_tx, u32 dma_rx, int atds); + /* Configure the AXI Bus Mode Register */ + void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi); /* Dump DMA registers */ void (*dump_regs) (void __iomem *ioaddr); /* Set tx/rx threshold in the csr6 register diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 9d36ae788429..b0593a4268ee 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -240,7 +240,7 @@ enum rx_tx_priority_ratio { #define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ #define DMA_BUS_MODE_RPBL_SHIFT 17 #define DMA_BUS_MODE_USP 0x00800000 -#define DMA_BUS_MODE_PBL 0x01000000 +#define DMA_BUS_MODE_MAXPBL 0x01000000 #define DMA_BUS_MODE_AAL 0x02000000 /* DMA CRS Control and Status Register Mapping */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index 5f0aea56b298..da32d6037e3e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -30,24 +30,76 @@ #include "dwmac1000.h" #include "dwmac_dma.h" +static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi) +{ + u32 value = readl(ioaddr + DMA_AXI_BUS_MODE); + int i; + + pr_info("dwmac1000: Master AXI performs %s burst length\n", + !(value & DMA_AXI_UNDEF) ? "fixed" : "any"); + + if (axi->axi_lpi_en) + value |= DMA_AXI_EN_LPI; + if (axi->axi_xit_frm) + value |= DMA_AXI_LPI_XIT_FRM; + + value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) << + DMA_AXI_WR_OSR_LMT_SHIFT; + + value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) << + DMA_AXI_RD_OSR_LMT_SHIFT; + + /* Depending on the UNDEF bit the Master AXI will perform any burst + * length according to the BLEN programmed (by default all BLEN are + * set). + */ + for (i = 0; i < AXI_BLEN; i++) { + switch (axi->axi_blen[i]) { + case 256: + value |= DMA_AXI_BLEN256; + break; + case 128: + value |= DMA_AXI_BLEN128; + break; + case 64: + value |= DMA_AXI_BLEN64; + break; + case 32: + value |= DMA_AXI_BLEN32; + break; + case 16: + value |= DMA_AXI_BLEN16; + break; + case 8: + value |= DMA_AXI_BLEN8; + break; + case 4: + value |= DMA_AXI_BLEN4; + break; + } + } + + writel(value, ioaddr + DMA_AXI_BUS_MODE); +} + static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds) + int aal, u32 dma_tx, u32 dma_rx, int atds) { - u32 value; + u32 value = readl(ioaddr + DMA_BUS_MODE); /* - * Set the DMA PBL (Programmable Burst Length) mode - * Before stmmac core 3.50 this mode bit was 4xPBL, and + * Set the DMA PBL (Programmable Burst Length) mode. + * + * Note: before stmmac core 3.50 this mode bit was 4xPBL, and * post 3.5 mode bit acts as 8*PBL. - * For core rev < 3.5, when the core is set for 4xPBL mode, the - * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats - * depending on pbl value. - * For core rev > 3.5, when the core is set for 8xPBL mode, the - * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats - * depending on pbl value. + * + * This configuration doesn't take care about the Separate PBL + * so only the bits: 13-8 are programmed with the PBL passed from the + * platform. */ - value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) | - (pbl << DMA_BUS_MODE_RPBL_SHIFT)); + value |= DMA_BUS_MODE_MAXPBL; + value &= ~DMA_BUS_MODE_PBL_MASK; + value |= (pbl << DMA_BUS_MODE_PBL_SHIFT); /* Set the Fixed burst mode */ if (fb) @@ -60,26 +112,10 @@ static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, if (atds) value |= DMA_BUS_MODE_ATDS; - writel(value, ioaddr + DMA_BUS_MODE); + if (aal) + value |= DMA_BUS_MODE_AAL; - /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE - * for supported bursts. - * - * Note: This is applicable only for revision GMACv3.61a. For - * older version this register is reserved and shall have no - * effect. - * - * Note: - * For Fixed Burst Mode: if we directly write 0xFF to this - * register using the configurations pass from platform code, - * this would ensure that all bursts supported by core are set - * and those which are not supported would remain ineffective. - * - * For Non Fixed Burst Mode: provide the maximum value of the - * burst length. Any burst equal or below the provided burst - * length would be allowed to perform. - */ - writel(burst_len, ioaddr + DMA_AXI_BUS_MODE); + writel(value, ioaddr + DMA_BUS_MODE); /* Mask interrupts by writing to CSR7 */ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); @@ -192,6 +228,7 @@ static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt) const struct stmmac_dma_ops dwmac1000_dma_ops = { .reset = dwmac_dma_reset, .init = dwmac1000_dma_init, + .axi = dwmac1000_dma_axi, .dump_regs = dwmac1000_dump_dma_regs, .dma_mode = dwmac1000_dma_operation_mode, .enable_dma_transmission = dwmac_enable_dma_transmission, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index c40582a938a4..61f54c99a7de 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -33,7 +33,7 @@ #include "dwmac_dma.h" static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb, - int burst_len, u32 dma_tx, u32 dma_rx, int atds) + int aal, u32 dma_tx, u32 dma_rx, int atds) { /* Enable Application Access by writing to DMA CSR0 */ writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 13ca90e23479..726d9d9aaf83 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -41,8 +41,40 @@ /* Rx watchdog register */ #define DMA_RX_WATCHDOG 0x00001024 -/* AXI Bus Mode */ + +/* AXI Master Bus Mode */ #define DMA_AXI_BUS_MODE 0x00001028 + +#define DMA_AXI_EN_LPI BIT(31) +#define DMA_AXI_LPI_XIT_FRM BIT(30) +#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20) +#define DMA_AXI_WR_OSR_LMT_SHIFT 20 +#define DMA_AXI_WR_OSR_LMT_MASK 0xf +#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16) +#define DMA_AXI_RD_OSR_LMT_SHIFT 16 +#define DMA_AXI_RD_OSR_LMT_MASK 0xf + +#define DMA_AXI_OSR_MAX 0xf +#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \ + (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT)) +#define DMA_AXI_1KBBE BIT(13) +#define DMA_AXI_AAL BIT(12) +#define DMA_AXI_BLEN256 BIT(7) +#define DMA_AXI_BLEN128 BIT(6) +#define DMA_AXI_BLEN64 BIT(5) +#define DMA_AXI_BLEN32 BIT(4) +#define DMA_AXI_BLEN16 BIT(3) +#define DMA_AXI_BLEN8 BIT(2) +#define DMA_AXI_BLEN4 BIT(1) +#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \ + DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \ + DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \ + DMA_AXI_BLEN4) + +#define DMA_AXI_UNDEF BIT(0) + +#define DMA_AXI_BURST_LEN_MASK 0x000000FE + #define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ #define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ #define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 13752e933e43..89c26268822e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1635,7 +1635,7 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv) */ static int stmmac_init_dma_engine(struct stmmac_priv *priv) { - int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; + int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0; int mixed_burst = 0; int atds = 0; int ret = 0; @@ -1644,7 +1644,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) pbl = priv->plat->dma_cfg->pbl; fixed_burst = priv->plat->dma_cfg->fixed_burst; mixed_burst = priv->plat->dma_cfg->mixed_burst; - burst_len = priv->plat->dma_cfg->burst_len; + aal = priv->plat->dma_cfg->aal; } if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) @@ -1657,8 +1657,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv) } priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, - burst_len, priv->dma_tx_phy, - priv->dma_rx_phy, atds); + aal, priv->dma_tx_phy, priv->dma_rx_phy, atds); + + if ((priv->synopsys_id >= DWMAC_CORE_3_50) && + (priv->plat->axi && priv->hw->dma->axi)) + priv->hw->dma->axi(priv->ioaddr, priv->plat->axi); + return ret; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index d71a721ea61c..ae4388735b7f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c @@ -81,7 +81,7 @@ static void stmmac_default_data(struct plat_stmmacenet_data *plat) plat->mdio_bus_data->phy_mask = 0; plat->dma_cfg->pbl = 32; - plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; + /* TODO: AXI */ /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; @@ -115,8 +115,8 @@ static int quark_default_data(struct plat_stmmacenet_data *plat, plat->mdio_bus_data->phy_mask = 0; plat->dma_cfg->pbl = 16; - plat->dma_cfg->burst_len = DMA_AXI_BLEN_256; plat->dma_cfg->fixed_burst = 1; + /* AXI (TODO) */ /* Set default value for multicast hash bins */ plat->multicast_filter_bins = HASH_TABLE_SIZE; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 6a52fa18cbf2..69ccf486d4fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -95,6 +95,42 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries) return x; } +/** + * stmmac_axi_setup - parse DT parameters for programming the AXI register + * @pdev: platform device + * @priv: driver private struct. + * Description: + * if required, from device-tree the AXI internal register can be tuned + * by using platform parameters. + */ +static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) +{ + struct device_node *np; + struct stmmac_axi *axi; + + np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0); + if (!np) + return NULL; + + axi = kzalloc(sizeof(axi), GFP_KERNEL); + if (!axi) + return ERR_PTR(-ENOMEM); + + axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en"); + axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm"); + axi->axi_kbbe = of_property_read_bool(np, "snps,axi_kbbe"); + axi->axi_axi_all = of_property_read_bool(np, "snps,axi_all"); + axi->axi_fb = of_property_read_bool(np, "snps,axi_fb"); + axi->axi_mb = of_property_read_bool(np, "snps,axi_mb"); + axi->axi_rb = of_property_read_bool(np, "snps,axi_rb"); + + of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt); + of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt); + of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN); + + return axi; +} + /** * stmmac_probe_config_dt - parse device-tree driver parameters * @pdev: platform_device structure @@ -216,13 +252,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) } plat->dma_cfg = dma_cfg; of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); + dma_cfg->aal = of_property_read_bool(np, "snps,aal"); dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); - of_property_read_u32(np, "snps,burst_len", &dma_cfg->burst_len); - if (dma_cfg->burst_len < 0 || dma_cfg->burst_len > 256) - dma_cfg->burst_len = 0; } plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); if (plat->force_thresh_dma_mode) { @@ -230,6 +264,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } + plat->axi = stmmac_axi_setup(pdev); + return plat; } #else diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index eead8ab93c0a..6e53fa8942a4 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -90,7 +90,21 @@ struct stmmac_dma_cfg { int pbl; int fixed_burst; int mixed_burst; - int burst_len; + bool aal; +}; + +#define AXI_BLEN 7 +struct stmmac_axi { + bool axi_lpi_en; + bool axi_xit_frm; + u32 axi_wr_osr_lmt; + u32 axi_rd_osr_lmt; + bool axi_kbbe; + bool axi_axi_all; + u32 axi_blen[AXI_BLEN]; + bool axi_fb; + bool axi_mb; + bool axi_rb; }; struct plat_stmmacenet_data { @@ -122,5 +136,6 @@ struct plat_stmmacenet_data { int (*init)(struct platform_device *pdev, void *priv); void (*exit)(struct platform_device *pdev, void *priv); void *bsp_priv; + struct stmmac_axi *axi; }; #endif -- cgit v1.2.3 From 293e4365a1adb1d3747dcb7d12a52950e0e232a2 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:29 +0100 Subject: stmmac: change descriptor layout This patch completely changes the descriptor layout to improve the whole performances due to the single read usage of the descriptors in critical paths. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/descs.h | 330 +++++++++++------------- drivers/net/ethernet/stmicro/stmmac/descs_com.h | 77 +++--- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 190 +++++++------- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 121 +++++---- 4 files changed, 355 insertions(+), 363 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 799c2929c536..2e4c171a2b41 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -1,6 +1,6 @@ /******************************************************************************* - Header File to describe the DMA descriptors. - Enhanced descriptors have been in case of DWMAC1000 Cores. + Header File to describe the DMA descriptors and related definitions. + This is for DWMAC100 and 1000 cores. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -24,198 +24,164 @@ #ifndef __DESCS_H__ #define __DESCS_H__ +#include + +/* Normal receive descriptor defines */ + +/* RDES0 */ +#define RDES0_PAYLOAD_CSUM_ERR BIT(0) +#define RDES0_CRC_ERROR BIT(1) +#define RDES0_DRIBBLING BIT(2) +#define RDES0_MII_ERROR BIT(3) +#define RDES0_RECEIVE_WATCHDOG BIT(4) +#define RDES0_FRAME_TYPE BIT(5) +#define RDES0_COLLISION BIT(6) +#define RDES0_IPC_CSUM_ERROR BIT(7) +#define RDES0_LAST_DESCRIPTOR BIT(8) +#define RDES0_FIRST_DESCRIPTOR BIT(9) +#define RDES0_VLAN_TAG BIT(10) +#define RDES0_OVERFLOW_ERROR BIT(11) +#define RDES0_LENGTH_ERROR BIT(12) +#define RDES0_SA_FILTER_FAIL BIT(13) +#define RDES0_DESCRIPTOR_ERROR BIT(14) +#define RDES0_ERROR_SUMMARY BIT(15) +#define RDES0_FRAME_LEN_MASK GENMASK(29, 16) +#define RDES0_FRAME_LEN_SHIFT 16 +#define RDES0_DA_FILTER_FAIL BIT(30) +#define RDES0_OWN BIT(31) + /* RDES1 */ +#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define RDES1_BUFFER2_SIZE_SHIFT 11 +#define RDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define RDES1_END_RING BIT(25) +#define RDES1_DISABLE_IC BIT(31) + +/* Enhanced receive descriptor defines */ + +/* RDES0 (similar to normal RDES) */ +#define ERDES0_RX_MAC_ADDR BIT(0) + +/* RDES1: completely differ from normal desc definitions */ +#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14) +#define ERDES1_END_RING BIT(15) +#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ERDES1_BUFFER2_SIZE_SHIFT 16 +#define ERDES1_DISABLE_IC BIT(31) + +/* Normal transmit descriptor defines */ +/* TDES0 */ +#define TDES0_DEFERRED BIT(0) +#define TDES0_UNDERFLOW_ERROR BIT(1) +#define TDES0_EXCESSIVE_DEFERRAL BIT(2) +#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define TDES0_VLAN_FRAME BIT(7) +#define TDES0_EXCESSIVE_COLLISIONS BIT(8) +#define TDES0_LATE_COLLISION BIT(9) +#define TDES0_NO_CARRIER BIT(10) +#define TDES0_LOSS_CARRIER BIT(11) +#define TDES0_PAYLOAD_ERROR BIT(12) +#define TDES0_FRAME_FLUSHED BIT(13) +#define TDES0_JABBER_TIMEOUT BIT(14) +#define TDES0_ERROR_SUMMARY BIT(15) +#define TDES0_IP_HEADER_ERROR BIT(16) +#define TDES0_TIME_STAMP_STATUS BIT(17) +#define TDES0_OWN BIT(31) +/* TDES1 */ +#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0) +#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11) +#define TDES1_BUFFER2_SIZE_SHIFT 11 +#define TDES1_TIME_STAMP_ENABLE BIT(22) +#define TDES1_DISABLE_PADDING BIT(23) +#define TDES1_SECOND_ADDRESS_CHAINED BIT(24) +#define TDES1_END_RING BIT(25) +#define TDES1_CRC_DISABLE BIT(26) +#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27) +#define TDES1_CHECKSUM_INSERTION_SHIFT 27 +#define TDES1_FIRST_SEGMENT BIT(29) +#define TDES1_LAST_SEGMENT BIT(30) +#define TDES1_INTERRUPT BIT(31) + +/* Enhanced transmit descriptor defines */ +/* TDES0 */ +#define ETDES0_DEFERRED BIT(0) +#define ETDES0_UNDERFLOW_ERROR BIT(1) +#define ETDES0_EXCESSIVE_DEFERRAL BIT(2) +#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3) +#define ETDES0_VLAN_FRAME BIT(7) +#define ETDES0_EXCESSIVE_COLLISIONS BIT(8) +#define ETDES0_LATE_COLLISION BIT(9) +#define ETDES0_NO_CARRIER BIT(10) +#define ETDES0_LOSS_CARRIER BIT(11) +#define ETDES0_PAYLOAD_ERROR BIT(12) +#define ETDES0_FRAME_FLUSHED BIT(13) +#define ETDES0_JABBER_TIMEOUT BIT(14) +#define ETDES0_ERROR_SUMMARY BIT(15) +#define ETDES0_IP_HEADER_ERROR BIT(16) +#define ETDES0_TIME_STAMP_STATUS BIT(17) +#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20) +#define ETDES0_END_RING BIT(21) +#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22) +#define ETDES0_CHECKSUM_INSERTION_SHIFT 22 +#define ETDES0_TIME_STAMP_ENABLE BIT(25) +#define ETDES0_DISABLE_PADDING BIT(26) +#define ETDES0_CRC_DISABLE BIT(27) +#define ETDES0_FIRST_SEGMENT BIT(28) +#define ETDES0_LAST_SEGMENT BIT(29) +#define ETDES0_INTERRUPT BIT(30) +#define ETDES0_OWN BIT(31) +/* TDES1 */ +#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0) +#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16) +#define ETDES1_BUFFER2_SIZE_SHIFT 16 + +/* Extended Receive descriptor definitions */ +#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(2, 6) +#define ERDES4_IP_HDR_ERR BIT(3) +#define ERDES4_IP_PAYLOAD_ERR BIT(4) +#define ERDES4_IP_CSUM_BYPASSED BIT(5) +#define ERDES4_IPV4_PKT_RCVD BIT(6) +#define ERDES4_IPV6_PKT_RCVD BIT(7) +#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8) +#define ERDES4_PTP_FRAME_TYPE BIT(12) +#define ERDES4_PTP_VER BIT(13) +#define ERDES4_TIMESTAMP_DROPPED BIT(14) +#define ERDES4_AV_PKT_RCVD BIT(16) +#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17) +#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18) +#define ERDES4_L3_FILTER_MATCH BIT(24) +#define ERDES4_L4_FILTER_MATCH BIT(25) +#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) + +/* Extended RDES4 message type definitions */ +#define RDES_EXT_NO_PTP 0 +#define RDES_EXT_SYNC 1 +#define RDES_EXT_FOLLOW_UP 2 +#define RDES_EXT_DELAY_REQ 3 +#define RDES_EXT_DELAY_RESP 4 +#define RDES_EXT_PDELAY_REQ 5 +#define RDES_EXT_PDELAY_RESP 6 +#define RDES_EXT_PDELAY_FOLLOW_UP 7 + /* Basic descriptor structure for normal and alternate descriptors */ struct dma_desc { - /* Receive descriptor */ - union { - struct { - /* RDES0 */ - u32 payload_csum_error:1; - u32 crc_error:1; - u32 dribbling:1; - u32 mii_error:1; - u32 receive_watchdog:1; - u32 frame_type:1; - u32 collision:1; - u32 ipc_csum_error:1; - u32 last_descriptor:1; - u32 first_descriptor:1; - u32 vlan_tag:1; - u32 overflow_error:1; - u32 length_error:1; - u32 sa_filter_fail:1; - u32 descriptor_error:1; - u32 error_summary:1; - u32 frame_length:14; - u32 da_filter_fail:1; - u32 own:1; - /* RDES1 */ - u32 buffer1_size:11; - u32 buffer2_size:11; - u32 reserved1:2; - u32 second_address_chained:1; - u32 end_ring:1; - u32 reserved2:5; - u32 disable_ic:1; - - } rx; - struct { - /* RDES0 */ - u32 rx_mac_addr:1; - u32 crc_error:1; - u32 dribbling:1; - u32 error_gmii:1; - u32 receive_watchdog:1; - u32 frame_type:1; - u32 late_collision:1; - u32 ipc_csum_error:1; - u32 last_descriptor:1; - u32 first_descriptor:1; - u32 vlan_tag:1; - u32 overflow_error:1; - u32 length_error:1; - u32 sa_filter_fail:1; - u32 descriptor_error:1; - u32 error_summary:1; - u32 frame_length:14; - u32 da_filter_fail:1; - u32 own:1; - /* RDES1 */ - u32 buffer1_size:13; - u32 reserved1:1; - u32 second_address_chained:1; - u32 end_ring:1; - u32 buffer2_size:13; - u32 reserved2:2; - u32 disable_ic:1; - } erx; /* -- enhanced -- */ - - /* Transmit descriptor */ - struct { - /* TDES0 */ - u32 deferred:1; - u32 underflow_error:1; - u32 excessive_deferral:1; - u32 collision_count:4; - u32 vlan_frame:1; - u32 excessive_collisions:1; - u32 late_collision:1; - u32 no_carrier:1; - u32 loss_carrier:1; - u32 payload_error:1; - u32 frame_flushed:1; - u32 jabber_timeout:1; - u32 error_summary:1; - u32 ip_header_error:1; - u32 time_stamp_status:1; - u32 reserved1:13; - u32 own:1; - /* TDES1 */ - u32 buffer1_size:11; - u32 buffer2_size:11; - u32 time_stamp_enable:1; - u32 disable_padding:1; - u32 second_address_chained:1; - u32 end_ring:1; - u32 crc_disable:1; - u32 checksum_insertion:2; - u32 first_segment:1; - u32 last_segment:1; - u32 interrupt:1; - } tx; - struct { - /* TDES0 */ - u32 deferred:1; - u32 underflow_error:1; - u32 excessive_deferral:1; - u32 collision_count:4; - u32 vlan_frame:1; - u32 excessive_collisions:1; - u32 late_collision:1; - u32 no_carrier:1; - u32 loss_carrier:1; - u32 payload_error:1; - u32 frame_flushed:1; - u32 jabber_timeout:1; - u32 error_summary:1; - u32 ip_header_error:1; - u32 time_stamp_status:1; - u32 reserved1:2; - u32 second_address_chained:1; - u32 end_ring:1; - u32 checksum_insertion:2; - u32 reserved2:1; - u32 time_stamp_enable:1; - u32 disable_padding:1; - u32 crc_disable:1; - u32 first_segment:1; - u32 last_segment:1; - u32 interrupt:1; - u32 own:1; - /* TDES1 */ - u32 buffer1_size:13; - u32 reserved3:3; - u32 buffer2_size:13; - u32 reserved4:3; - } etx; /* -- enhanced -- */ - - u64 all_flags; - } des01; + unsigned int des0; + unsigned int des1; unsigned int des2; unsigned int des3; }; -/* Extended descriptor structure (supported by new SYNP GMAC generations) */ +/* Extended descriptor structure (e.g. >= databook 3.50a) */ struct dma_extended_desc { - struct dma_desc basic; - union { - struct { - u32 ip_payload_type:3; - u32 ip_hdr_err:1; - u32 ip_payload_err:1; - u32 ip_csum_bypassed:1; - u32 ipv4_pkt_rcvd:1; - u32 ipv6_pkt_rcvd:1; - u32 msg_type:4; - u32 ptp_frame_type:1; - u32 ptp_ver:1; - u32 timestamp_dropped:1; - u32 reserved:1; - u32 av_pkt_rcvd:1; - u32 av_tagged_pkt_rcvd:1; - u32 vlan_tag_priority_val:3; - u32 reserved3:3; - u32 l3_filter_match:1; - u32 l4_filter_match:1; - u32 l3_l4_filter_no_match:2; - u32 reserved4:4; - } erx; - struct { - u32 reserved; - } etx; - } des4; + struct dma_desc basic; /* Basic descriptors */ + unsigned int des4; /* Extended Status */ unsigned int des5; /* Reserved */ unsigned int des6; /* Tx/Rx Timestamp Low */ unsigned int des7; /* Tx/Rx Timestamp High */ }; /* Transmit checksum insertion control */ -enum tdes_csum_insertion { - cic_disabled = 0, /* Checksum Insertion Control */ - cic_only_ip = 1, /* Only IP header */ - /* IP header but pseudoheader is not calculated */ - cic_no_pseudoheader = 2, - cic_full = 3, /* IP header and pseudoheader */ -}; - -/* Extended RDES4 definitions */ -#define RDES_EXT_NO_PTP 0 -#define RDES_EXT_SYNC 0x1 -#define RDES_EXT_FOLLOW_UP 0x2 -#define RDES_EXT_DELAY_REQ 0x3 -#define RDES_EXT_DELAY_RESP 0x4 -#define RDES_EXT_PDELAY_REQ 0x5 -#define RDES_EXT_PDELAY_RESP 0x6 -#define RDES_EXT_PDELAY_FOLLOW_UP 0x7 +#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */ #endif /* __DESCS_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h index 6f2cc78c5cf5..7635a464ce41 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -35,100 +35,91 @@ /* Enhanced descriptors */ static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) { - p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; - if (end) - p->des01.erx.end_ring = 1; -} + p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT) + & ERDES1_BUFFER2_SIZE_MASK; -static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end) -{ if (end) - p->des01.etx.end_ring = 1; + p->des1 |= ERDES1_END_RING; } -static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter) +static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end) { - p->des01.etx.end_ring = ter; + if (end) + p->des0 |= ETDES0_END_RING; + else + p->des0 &= ~ETDES0_END_RING; } static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len) { if (unlikely(len > BUF_SIZE_4KiB)) { - p->des01.etx.buffer1_size = BUF_SIZE_4KiB; - p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB; + p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT) + & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB + & ETDES1_BUFFER1_SIZE_MASK); } else - p->des01.etx.buffer1_size = len; + p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK); } /* Normal descriptors */ static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) { - p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; - if (end) - p->des01.rx.end_ring = 1; -} + p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT) + & RDES1_BUFFER2_SIZE_MASK; -static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end) -{ if (end) - p->des01.tx.end_ring = 1; + p->des1 |= RDES1_END_RING; } -static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter) +static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end) { - p->des01.tx.end_ring = ter; + if (end) + p->des1 |= TDES1_END_RING; + else + p->des1 &= ~TDES1_END_RING; } static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len) { if (unlikely(len > BUF_SIZE_2KiB)) { - p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; - p->des01.etx.buffer2_size = len - p->des01.etx.buffer1_size; + unsigned int buffer1 = (BUF_SIZE_2KiB - 1) + & TDES1_BUFFER1_SIZE_MASK; + p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT) + & TDES1_BUFFER2_SIZE_MASK) | buffer1); } else - p->des01.tx.buffer1_size = len; + p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK); } /* Specific functions used for Chain mode */ /* Enhanced descriptors */ -static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end) -{ - p->des01.erx.second_address_chained = 1; -} - -static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end) +static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p) { - p->des01.etx.second_address_chained = 1; + p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED; } -static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter) +static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p) { - p->des01.etx.second_address_chained = 1; + p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED; } static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len) { - p->des01.etx.buffer1_size = len; + p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK); } /* Normal descriptors */ static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end) { - p->des01.rx.second_address_chained = 1; -} - -static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int ring_size) -{ - p->des01.tx.second_address_chained = 1; + p->des1 |= RDES1_SECOND_ADDRESS_CHAINED; } -static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter) +static inline void ndesc_tx_set_on_chain(struct dma_desc *p) { - p->des01.tx.second_address_chained = 1; + p->des1 |= TDES1_SECOND_ADDRESS_CHAINED; } static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len) { - p->des01.tx.buffer1_size = len; + p->des1 |= len & TDES1_BUFFER1_SIZE_MASK; } #endif /* __DESC_COM_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 7d944449f5ef..716b80740b58 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -1,7 +1,7 @@ /******************************************************************************* This contains the functions to handle the enhanced descriptors. - Copyright (C) 2007-2009 STMicroelectronics Ltd + Copyright (C) 2007-2014 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, @@ -29,44 +29,44 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { - int ret = 0; struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = p->des0; + int ret = 0; - if (unlikely(p->des01.etx.error_summary)) { - if (unlikely(p->des01.etx.jabber_timeout)) + if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT)) x->tx_jabber++; - if (unlikely(p->des01.etx.frame_flushed)) { + if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) { x->tx_frame_flushed++; dwmac_dma_flush_tx_fifo(ioaddr); } - if (unlikely(p->des01.etx.loss_carrier)) { + if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) { x->tx_losscarrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.etx.no_carrier)) { + if (unlikely(tdes0 & ETDES0_NO_CARRIER)) { x->tx_carrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.etx.late_collision)) - stats->collisions += p->des01.etx.collision_count; - - if (unlikely(p->des01.etx.excessive_collisions)) - stats->collisions += p->des01.etx.collision_count; + if (unlikely((tdes0 & ETDES0_LATE_COLLISION) || + (tdes0 & ETDES0_EXCESSIVE_COLLISIONS))) + stats->collisions += + (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3; - if (unlikely(p->des01.etx.excessive_deferral)) + if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL)) x->tx_deferred++; - if (unlikely(p->des01.etx.underflow_error)) { + if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) { dwmac_dma_flush_tx_fifo(ioaddr); x->tx_underflow++; } - if (unlikely(p->des01.etx.ip_header_error)) + if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR)) x->tx_ip_header_error++; - if (unlikely(p->des01.etx.payload_error)) { + if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) { x->tx_payload_error++; dwmac_dma_flush_tx_fifo(ioaddr); } @@ -74,11 +74,11 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, ret = -1; } - if (unlikely(p->des01.etx.deferred)) + if (unlikely(tdes0 & ETDES0_DEFERRED)) x->tx_deferred++; #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.etx.vlan_frame) + if (tdes0 & ETDES0_VLAN_FRAME) x->tx_vlan++; #endif @@ -87,7 +87,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, static int enh_desc_get_tx_len(struct dma_desc *p) { - return p->des01.etx.buffer1_size; + return (p->des1 & ETDES1_BUFFER1_SIZE_MASK); } static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) @@ -126,50 +126,55 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, struct dma_extended_desc *p) { - if (unlikely(p->basic.des01.erx.rx_mac_addr)) { - if (p->des4.erx.ip_hdr_err) + unsigned int rdes0 = p->basic.des0; + unsigned int rdes4 = p->des4; + + if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) { + int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8; + + if (rdes4 & ERDES4_IP_HDR_ERR) x->ip_hdr_err++; - if (p->des4.erx.ip_payload_err) + if (rdes4 & ERDES4_IP_PAYLOAD_ERR) x->ip_payload_err++; - if (p->des4.erx.ip_csum_bypassed) + if (rdes4 & ERDES4_IP_CSUM_BYPASSED) x->ip_csum_bypassed++; - if (p->des4.erx.ipv4_pkt_rcvd) + if (rdes4 & ERDES4_IPV4_PKT_RCVD) x->ipv4_pkt_rcvd++; - if (p->des4.erx.ipv6_pkt_rcvd) + if (rdes4 & ERDES4_IPV6_PKT_RCVD) x->ipv6_pkt_rcvd++; - if (p->des4.erx.msg_type == RDES_EXT_SYNC) + if (message_type == RDES_EXT_SYNC) x->rx_msg_type_sync++; - else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) + else if (message_type == RDES_EXT_FOLLOW_UP) x->rx_msg_type_follow_up++; - else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) + else if (message_type == RDES_EXT_DELAY_REQ) x->rx_msg_type_delay_req++; - else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) + else if (message_type == RDES_EXT_DELAY_RESP) x->rx_msg_type_delay_resp++; - else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ) + else if (message_type == RDES_EXT_PDELAY_REQ) x->rx_msg_type_pdelay_req++; - else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) + else if (message_type == RDES_EXT_PDELAY_RESP) x->rx_msg_type_pdelay_resp++; - else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) + else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) x->rx_msg_type_pdelay_follow_up++; else x->rx_msg_type_ext_no_ptp++; - if (p->des4.erx.ptp_frame_type) + if (rdes4 & ERDES4_PTP_FRAME_TYPE) x->ptp_frame_type++; - if (p->des4.erx.ptp_ver) + if (rdes4 & ERDES4_PTP_VER) x->ptp_ver++; - if (p->des4.erx.timestamp_dropped) + if (rdes4 & ERDES4_TIMESTAMP_DROPPED) x->timestamp_dropped++; - if (p->des4.erx.av_pkt_rcvd) + if (rdes4 & ERDES4_AV_PKT_RCVD) x->av_pkt_rcvd++; - if (p->des4.erx.av_tagged_pkt_rcvd) + if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD) x->av_tagged_pkt_rcvd++; - if (p->des4.erx.vlan_tag_priority_val) + if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18) x->vlan_tag_priority_val++; - if (p->des4.erx.l3_filter_match) + if (rdes4 & ERDES4_L3_FILTER_MATCH) x->l3_filter_match++; - if (p->des4.erx.l4_filter_match) + if (rdes4 & ERDES4_L4_FILTER_MATCH) x->l4_filter_match++; - if (p->des4.erx.l3_l4_filter_no_match) + if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26) x->l3_l4_filter_no_match++; } } @@ -177,30 +182,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { - int ret = good_frame; struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int rdes0 = p->des0; + int ret = good_frame; - if (unlikely(p->des01.erx.error_summary)) { - if (unlikely(p->des01.erx.descriptor_error)) { + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; stats->rx_length_errors++; } - if (unlikely(p->des01.erx.overflow_error)) + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) x->rx_gmac_overflow++; - if (unlikely(p->des01.erx.ipc_csum_error)) + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) pr_err("\tIPC Csum Error/Giant frame\n"); - if (unlikely(p->des01.erx.late_collision)) { + if (unlikely(rdes0 & RDES0_COLLISION)) stats->collisions++; - } - if (unlikely(p->des01.erx.receive_watchdog)) + if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG)) x->rx_watchdog++; - if (unlikely(p->des01.erx.error_gmii)) + if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */ x->rx_mii++; - if (unlikely(p->des01.erx.crc_error)) { + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { x->rx_crc++; stats->rx_crc_errors++; } @@ -211,26 +216,27 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, * It doesn't match with the information reported into the databook. * At any rate, we need to understand if the CSUM hw computation is ok * and report this info to the upper layers. */ - ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, - p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); + ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), + !!(rdes0 & RDES0_FRAME_TYPE), + !!(rdes0 & ERDES0_RX_MAC_ADDR)); - if (unlikely(p->des01.erx.dribbling)) + if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; - if (unlikely(p->des01.erx.sa_filter_fail)) { + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) { x->sa_rx_filter_fail++; ret = discard_frame; } - if (unlikely(p->des01.erx.da_filter_fail)) { + if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) { x->da_rx_filter_fail++; ret = discard_frame; } - if (unlikely(p->des01.erx.length_error)) { + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { x->rx_length++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.erx.vlan_tag) + if (rdes0 & RDES0_VLAN_TAG) x->rx_vlan++; #endif @@ -240,60 +246,59 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des01.all_flags = 0; - p->des01.erx.own = 1; - p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + p->des0 |= RDES0_OWN; + p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK); if (mode == STMMAC_CHAIN_MODE) - ehn_desc_rx_set_on_chain(p, end); + ehn_desc_rx_set_on_chain(p); else ehn_desc_rx_set_on_ring(p, end); if (disable_rx_ic) - p->des01.erx.disable_ic = 1; + p->des1 |= ERDES1_DISABLE_IC; } static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des01.all_flags = 0; + p->des0 &= ~ETDES0_OWN; if (mode == STMMAC_CHAIN_MODE) - ehn_desc_tx_set_on_chain(p, end); + enh_desc_end_tx_desc_on_chain(p); else - ehn_desc_tx_set_on_ring(p, end); + enh_desc_end_tx_desc_on_ring(p, end); } static int enh_desc_get_tx_owner(struct dma_desc *p) { - return p->des01.etx.own; + return (p->des0 & ETDES0_OWN) >> 31; } static int enh_desc_get_rx_owner(struct dma_desc *p) { - return p->des01.erx.own; + return (p->des0 & RDES0_OWN) >> 31; } static void enh_desc_set_tx_owner(struct dma_desc *p) { - p->des01.etx.own = 1; + p->des0 |= ETDES0_OWN; } static void enh_desc_set_rx_owner(struct dma_desc *p) { - p->des01.erx.own = 1; + p->des0 |= RDES0_OWN; } static int enh_desc_get_tx_ls(struct dma_desc *p) { - return p->des01.etx.last_segment; + return (p->des0 & ETDES0_LAST_SEGMENT) >> 29; } static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) { - int ter = p->des01.etx.end_ring; + int ter = (p->des0 & ETDES0_END_RING) >> 21; memset(p, 0, offsetof(struct dma_desc, des2)); if (mode == STMMAC_CHAIN_MODE) - enh_desc_end_tx_desc_on_chain(p, ter); + enh_desc_end_tx_desc_on_chain(p); else enh_desc_end_tx_desc_on_ring(p, ter); } @@ -301,49 +306,60 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, int csum_flag, int mode) { - p->des01.etx.first_segment = is_fs; + unsigned int tdes0 = p->des0; + + if (is_fs) + tdes0 |= ETDES0_FIRST_SEGMENT; + else + tdes0 &= ~ETDES0_FIRST_SEGMENT; + + if (likely(csum_flag)) + tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + else + tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + + p->des0 = tdes0; if (mode == STMMAC_CHAIN_MODE) enh_set_tx_desc_len_on_chain(p, len); else enh_set_tx_desc_len_on_ring(p, len); - - if (likely(csum_flag)) - p->des01.etx.checksum_insertion = cic_full; } static void enh_desc_clear_tx_ic(struct dma_desc *p) { - p->des01.etx.interrupt = 0; + p->des0 &= ~ETDES0_INTERRUPT; } static void enh_desc_close_tx_desc(struct dma_desc *p) { - p->des01.etx.last_segment = 1; - p->des01.etx.interrupt = 1; + p->des0 |= ETDES0_LAST_SEGMENT | ETDES0_INTERRUPT; } static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { + unsigned int csum = 0; /* The type-1 checksum offload engines append the checksum at * the end of frame and the two bytes of checksum are added in * the length. * Adjust for that in the framelen for type-1 checksum offload - * engines. */ + * engines. + */ if (rx_coe_type == STMMAC_RX_COE_TYPE1) - return p->des01.erx.frame_length - 2; - else - return p->des01.erx.frame_length; + csum = 2; + + return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - + csum); } static void enh_desc_enable_tx_timestamp(struct dma_desc *p) { - p->des01.etx.time_stamp_enable = 1; + p->des0 |= ETDES0_TIME_STAMP_ENABLE; } static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) { - return p->des01.etx.time_stamp_status; + return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17; } static u64 enh_desc_get_timestamp(void *desc, u32 ats) @@ -368,7 +384,7 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) { if (ats) { struct dma_extended_desc *p = (struct dma_extended_desc *)desc; - return p->basic.des01.erx.ipc_csum_error; + return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7; } else { struct dma_desc *p = (struct dma_desc *)desc; if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 48c3456445b2..460c573dd3e1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -29,33 +29,38 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p, void __iomem *ioaddr) { - int ret = 0; struct net_device_stats *stats = (struct net_device_stats *)data; + unsigned int tdes0 = p->des0; + int ret = 0; - if (unlikely(p->des01.tx.error_summary)) { - if (unlikely(p->des01.tx.underflow_error)) { + if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) { + if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) { x->tx_underflow++; stats->tx_fifo_errors++; } - if (unlikely(p->des01.tx.no_carrier)) { + if (unlikely(tdes0 & TDES0_NO_CARRIER)) { x->tx_carrier++; stats->tx_carrier_errors++; } - if (unlikely(p->des01.tx.loss_carrier)) { + if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) { x->tx_losscarrier++; stats->tx_carrier_errors++; } - if (unlikely((p->des01.tx.excessive_deferral) || - (p->des01.tx.excessive_collisions) || - (p->des01.tx.late_collision))) - stats->collisions += p->des01.tx.collision_count; + if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) || + (tdes0 & TDES0_EXCESSIVE_COLLISIONS) || + (tdes0 & TDES0_LATE_COLLISION))) { + unsigned int collisions; + + collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3; + stats->collisions += collisions; + } ret = -1; } - if (p->des01.etx.vlan_frame) + if (tdes0 & TDES0_VLAN_FRAME) x->tx_vlan++; - if (unlikely(p->des01.tx.deferred)) + if (unlikely(tdes0 & TDES0_DEFERRED)) x->tx_deferred++; return ret; @@ -63,7 +68,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, static int ndesc_get_tx_len(struct dma_desc *p) { - return p->des01.tx.buffer1_size; + return (p->des1 & RDES1_BUFFER1_SIZE_MASK); } /* This function verifies if each incoming frame has some errors @@ -74,47 +79,48 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, struct dma_desc *p) { int ret = good_frame; + unsigned int rdes0 = p->des0; struct net_device_stats *stats = (struct net_device_stats *)data; - if (unlikely(p->des01.rx.last_descriptor == 0)) { + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { pr_warn("%s: Oversized frame spanned multiple buffers\n", __func__); stats->rx_length_errors++; return discard_frame; } - if (unlikely(p->des01.rx.error_summary)) { - if (unlikely(p->des01.rx.descriptor_error)) + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { + if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) x->rx_desc++; - if (unlikely(p->des01.rx.sa_filter_fail)) + if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) x->sa_filter_fail++; - if (unlikely(p->des01.rx.overflow_error)) + if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR)) x->overflow_error++; - if (unlikely(p->des01.rx.ipc_csum_error)) + if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR)) x->ipc_csum_error++; - if (unlikely(p->des01.rx.collision)) { + if (unlikely(rdes0 & RDES0_COLLISION)) { x->rx_collision++; stats->collisions++; } - if (unlikely(p->des01.rx.crc_error)) { + if (unlikely(rdes0 & RDES0_CRC_ERROR)) { x->rx_crc++; stats->rx_crc_errors++; } ret = discard_frame; } - if (unlikely(p->des01.rx.dribbling)) + if (unlikely(rdes0 & RDES0_DRIBBLING)) x->dribbling_bit++; - if (unlikely(p->des01.rx.length_error)) { + if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) { x->rx_length++; ret = discard_frame; } - if (unlikely(p->des01.rx.mii_error)) { + if (unlikely(rdes0 & RDES0_MII_ERROR)) { x->rx_mii++; ret = discard_frame; } #ifdef STMMAC_VLAN_TAG_USED - if (p->des01.rx.vlan_tag) + if (rdes0 & RDES0_VLAN_TAG) x->vlan_tag++; #endif return ret; @@ -123,9 +129,8 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, int end) { - p->des01.all_flags = 0; - p->des01.rx.own = 1; - p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + p->des0 |= RDES0_OWN; + p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK; if (mode == STMMAC_CHAIN_MODE) ndesc_rx_set_on_chain(p, end); @@ -133,50 +138,50 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, ndesc_rx_set_on_ring(p, end); if (disable_rx_ic) - p->des01.rx.disable_ic = 1; + p->des1 |= RDES1_DISABLE_IC; } static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) { - p->des01.all_flags = 0; + p->des0 &= ~TDES0_OWN; if (mode == STMMAC_CHAIN_MODE) - ndesc_tx_set_on_chain(p, end); + ndesc_tx_set_on_chain(p); else - ndesc_tx_set_on_ring(p, end); + ndesc_end_tx_desc_on_ring(p, end); } static int ndesc_get_tx_owner(struct dma_desc *p) { - return p->des01.tx.own; + return (p->des0 & TDES0_OWN) >> 31; } static int ndesc_get_rx_owner(struct dma_desc *p) { - return p->des01.rx.own; + return (p->des0 & RDES0_OWN) >> 31; } static void ndesc_set_tx_owner(struct dma_desc *p) { - p->des01.tx.own = 1; + p->des0 |= TDES0_OWN; } static void ndesc_set_rx_owner(struct dma_desc *p) { - p->des01.rx.own = 1; + p->des0 |= RDES0_OWN; } static int ndesc_get_tx_ls(struct dma_desc *p) { - return p->des01.tx.last_segment; + return (p->des1 & TDES1_LAST_SEGMENT) >> 30; } static void ndesc_release_tx_desc(struct dma_desc *p, int mode) { - int ter = p->des01.tx.end_ring; + int ter = (p->des1 & TDES1_END_RING) >> 25; memset(p, 0, offsetof(struct dma_desc, des2)); if (mode == STMMAC_CHAIN_MODE) - ndesc_end_tx_desc_on_chain(p, ter); + ndesc_tx_set_on_chain(p); else ndesc_end_tx_desc_on_ring(p, ter); } @@ -184,48 +189,62 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode) static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, int csum_flag, int mode) { - p->des01.tx.first_segment = is_fs; + unsigned int tdes1 = p->des1; + + if (is_fs) + tdes1 |= TDES1_FIRST_SEGMENT; + else + tdes1 &= ~TDES1_FIRST_SEGMENT; + + if (likely(csum_flag)) + tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT; + else + tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT); + + p->des1 = tdes1; + if (mode == STMMAC_CHAIN_MODE) norm_set_tx_desc_len_on_chain(p, len); else norm_set_tx_desc_len_on_ring(p, len); - - if (likely(csum_flag)) - p->des01.tx.checksum_insertion = cic_full; } static void ndesc_clear_tx_ic(struct dma_desc *p) { - p->des01.tx.interrupt = 0; + p->des1 &= ~TDES1_INTERRUPT; } static void ndesc_close_tx_desc(struct dma_desc *p) { - p->des01.tx.last_segment = 1; - p->des01.tx.interrupt = 1; + p->des1 |= TDES1_LAST_SEGMENT | TDES1_INTERRUPT; } static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { + unsigned int csum = 0; + /* The type-1 checksum offload engines append the checksum at * the end of frame and the two bytes of checksum are added in * the length. * Adjust for that in the framelen for type-1 checksum offload - * engines. */ + * engines + */ if (rx_coe_type == STMMAC_RX_COE_TYPE1) - return p->des01.rx.frame_length - 2; - else - return p->des01.rx.frame_length; + csum = 2; + + return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) - + csum); + } static void ndesc_enable_tx_timestamp(struct dma_desc *p) { - p->des01.tx.time_stamp_enable = 1; + p->des1 |= TDES1_TIME_STAMP_ENABLE; } static int ndesc_get_tx_timestamp_status(struct dma_desc *p) { - return p->des01.tx.time_stamp_status; + return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17; } static u64 ndesc_get_timestamp(void *desc, u32 ats) -- cgit v1.2.3 From e3ad57c96715df2989ce6c18e58faf2913b305cb Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:30 +0100 Subject: stmmac: review RX/TX ring management This patch is to rework the ring management now optimized. The indexes into the ring buffer are always incremented, and the entry is accessed via doing a modulo to find the "real" position in the ring. It is inefficient, modulo is an expensive operation. The formula [(entry + 1) & (size - 1)] is now adopted on a ring that is power-of-2 in size. Then, the number of elements cannot be set by command line but it is fixed. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 17 +- drivers/net/ethernet/stmicro/stmmac/common.h | 4 + drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 7 +- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 198 ++++++++++------------ 5 files changed, 111 insertions(+), 117 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index cf28daba4346..2763772ef1d4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -31,8 +31,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; - unsigned int txsize = priv->dma_tx_size; - unsigned int entry = priv->cur_tx % txsize; + unsigned int entry = priv->cur_tx; struct dma_desc *desc = priv->dma_tx + entry; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax; @@ -54,7 +53,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) while (len != 0) { priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); desc = priv->dma_tx + entry; if (len > bmax) { @@ -82,6 +81,9 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) len = 0; } } + + priv->cur_tx = entry; + return entry; } @@ -138,7 +140,7 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) */ p->des3 = (unsigned int)(priv->dma_rx_phy + (((priv->dirty_rx) + 1) % - priv->dma_rx_size) * + DMA_RX_SIZE) * sizeof(struct dma_desc)); } @@ -151,10 +153,9 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. */ - p->des3 = (unsigned int)(priv->dma_tx_phy + - (((priv->dirty_tx + 1) % - priv->dma_tx_size) * - sizeof(struct dma_desc))); + p->des3 = (unsigned int)((priv->dma_tx_phy + + ((priv->dirty_tx + 1) % DMA_TX_SIZE)) + * sizeof(struct dma_desc)); } const struct stmmac_mode_ops chain_mode_ops = { diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 586a33624dd2..09291af19039 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -42,6 +42,10 @@ #define DWMAC_CORE_3_40 0x34 #define DWMAC_CORE_3_50 0x35 +#define DMA_TX_SIZE 512 +#define DMA_RX_SIZE 512 +#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1)) + #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 5dd50c6cda5b..4358a87fbc6e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -31,8 +31,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; - unsigned int txsize = priv->dma_tx_size; - unsigned int entry = priv->cur_tx % txsize; + unsigned int entry = priv->cur_tx; struct dma_desc *desc; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax, len; @@ -62,7 +61,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) STMMAC_RING_MODE); wmb(); priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); if (priv->extend_desc) desc = (struct dma_desc *)(priv->dma_etx + entry); @@ -90,6 +89,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) STMMAC_RING_MODE); } + priv->cur_tx = entry; + return entry; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 1f3b33a6c6a8..7ae7c6430a3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -54,7 +54,6 @@ struct stmmac_priv { struct sk_buff **tx_skbuff; unsigned int cur_tx; unsigned int dirty_tx; - unsigned int dma_tx_size; u32 tx_count_frames; u32 tx_coal_frames; u32 tx_coal_timer; @@ -71,7 +70,6 @@ struct stmmac_priv { struct sk_buff **rx_skbuff; unsigned int cur_rx; unsigned int dirty_rx; - unsigned int dma_rx_size; unsigned int dma_buf_sz; u32 rx_riwt; int hwts_rx_en; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 89c26268822e..eb555f0fe815 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -71,15 +71,7 @@ static int phyaddr = -1; module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); -#define DMA_TX_SIZE 256 -static int dma_txsize = DMA_TX_SIZE; -module_param(dma_txsize, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list"); - -#define DMA_RX_SIZE 256 -static int dma_rxsize = DMA_RX_SIZE; -module_param(dma_rxsize, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list"); +#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) static int flow_ctrl = FLOW_OFF; module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); @@ -134,10 +126,6 @@ static void stmmac_verify_args(void) { if (unlikely(watchdog < 0)) watchdog = TX_TIMEO; - if (unlikely(dma_rxsize < 0)) - dma_rxsize = DMA_RX_SIZE; - if (unlikely(dma_txsize < 0)) - dma_txsize = DMA_TX_SIZE; if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) buf_sz = DEFAULT_BUFSIZE; if (unlikely(flow_ctrl > 1)) @@ -197,12 +185,28 @@ static void print_pkt(unsigned char *buf, int len) print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); } -/* minimum number of free TX descriptors required to wake up TX process */ -#define STMMAC_TX_THRESH(x) (x->dma_tx_size/4) - static inline u32 stmmac_tx_avail(struct stmmac_priv *priv) { - return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; + unsigned avail; + + if (priv->dirty_tx > priv->cur_tx) + avail = priv->dirty_tx - priv->cur_tx - 1; + else + avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1; + + return avail; +} + +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv) +{ + unsigned dirty; + + if (priv->dirty_rx <= priv->cur_rx) + dirty = priv->cur_rx - priv->dirty_rx; + else + dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx; + + return dirty; } /** @@ -906,19 +910,16 @@ static void stmmac_display_ring(void *head, int size, int extend_desc) static void stmmac_display_rings(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; - if (priv->extend_desc) { pr_info("Extended RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1); pr_info("Extended TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1); } else { pr_info("RX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0); pr_info("TX descriptor ring:\n"); - stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0); } } @@ -947,28 +948,26 @@ static int stmmac_set_bfsize(int mtu, int bufsize) static void stmmac_clear_descriptors(struct stmmac_priv *priv) { int i; - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; /* Clear the Rx/Tx descriptors */ - for (i = 0; i < rxsize; i++) + for (i = 0; i < DMA_RX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic, priv->use_riwt, priv->mode, - (i == rxsize - 1)); + (i == DMA_RX_SIZE - 1)); else priv->hw->desc->init_rx_desc(&priv->dma_rx[i], priv->use_riwt, priv->mode, - (i == rxsize - 1)); - for (i = 0; i < txsize; i++) + (i == DMA_RX_SIZE - 1)); + for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); else priv->hw->desc->init_tx_desc(&priv->dma_tx[i], priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); } /** @@ -1031,8 +1030,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) { int i; struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; unsigned int bfsize = 0; int ret = -ENOMEM; @@ -1044,10 +1041,6 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) priv->dma_buf_sz = bfsize; - if (netif_msg_probe(priv)) - pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__, - txsize, rxsize, bfsize); - if (netif_msg_probe(priv)) { pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); @@ -1055,7 +1048,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) /* RX INITIALIZATION */ pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n"); } - for (i = 0; i < rxsize; i++) { + for (i = 0; i < DMA_RX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((priv->dma_erx + i)->basic); @@ -1072,26 +1065,26 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) (unsigned int)priv->rx_skbuff_dma[i]); } priv->cur_rx = 0; - priv->dirty_rx = (unsigned int)(i - rxsize); + priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE); buf_sz = bfsize; /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) { priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, - rxsize, 1); + DMA_RX_SIZE, 1); priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, - txsize, 1); + DMA_TX_SIZE, 1); } else { priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, - rxsize, 0); + DMA_RX_SIZE, 0); priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, - txsize, 0); + DMA_TX_SIZE, 0); } } /* TX INITIALIZATION */ - for (i = 0; i < txsize; i++) { + for (i = 0; i < DMA_TX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) p = &((priv->dma_etx + i)->basic); @@ -1123,7 +1116,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_rx_size; i++) + for (i = 0; i < DMA_RX_SIZE; i++) stmmac_free_rx_buffers(priv, i); } @@ -1131,7 +1124,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) { int i; - for (i = 0; i < priv->dma_tx_size; i++) { + for (i = 0; i < DMA_TX_SIZE; i++) { struct dma_desc *p; if (priv->extend_desc) @@ -1171,33 +1164,31 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) */ static int alloc_dma_desc_resources(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; int ret = -ENOMEM; - priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), + priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t), GFP_KERNEL); if (!priv->rx_skbuff_dma) return -ENOMEM; - priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), + priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->rx_skbuff) goto err_rx_skbuff; - priv->tx_skbuff_dma = kmalloc_array(txsize, + priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE, sizeof(*priv->tx_skbuff_dma), GFP_KERNEL); if (!priv->tx_skbuff_dma) goto err_tx_skbuff_dma; - priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), + priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!priv->tx_skbuff) goto err_tx_skbuff; if (priv->extend_desc) { - priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize * + priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), &priv->dma_rx_phy, @@ -1205,31 +1196,31 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) if (!priv->dma_erx) goto err_dma; - priv->dma_etx = dma_zalloc_coherent(priv->device, txsize * + priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_extended_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_etx) { - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), priv->dma_erx, priv->dma_rx_phy); goto err_dma; } } else { - priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize * + priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_desc), &priv->dma_rx_phy, GFP_KERNEL); if (!priv->dma_rx) goto err_dma; - priv->dma_tx = dma_zalloc_coherent(priv->device, txsize * + priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_desc), &priv->dma_tx_phy, GFP_KERNEL); if (!priv->dma_tx) { - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); goto err_dma; @@ -1258,16 +1249,16 @@ static void free_dma_desc_resources(struct stmmac_priv *priv) /* Free DMA regions of consistent memory previously allocated */ if (!priv->extend_desc) { dma_free_coherent(priv->device, - priv->dma_tx_size * sizeof(struct dma_desc), + DMA_TX_SIZE * sizeof(struct dma_desc), priv->dma_tx, priv->dma_tx_phy); dma_free_coherent(priv->device, - priv->dma_rx_size * sizeof(struct dma_desc), + DMA_RX_SIZE * sizeof(struct dma_desc), priv->dma_rx, priv->dma_rx_phy); } else { - dma_free_coherent(priv->device, priv->dma_tx_size * + dma_free_coherent(priv->device, DMA_TX_SIZE * sizeof(struct dma_extended_desc), priv->dma_etx, priv->dma_tx_phy); - dma_free_coherent(priv->device, priv->dma_rx_size * + dma_free_coherent(priv->device, DMA_RX_SIZE * sizeof(struct dma_extended_desc), priv->dma_erx, priv->dma_rx_phy); } @@ -1312,16 +1303,15 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv) */ static void stmmac_tx_clean(struct stmmac_priv *priv) { - unsigned int txsize = priv->dma_tx_size; unsigned int bytes_compl = 0, pkts_compl = 0; + unsigned int entry = priv->dirty_tx; spin_lock(&priv->tx_lock); priv->xstats.tx_clean++; - while (priv->dirty_tx != priv->cur_tx) { + while (entry != priv->cur_tx) { int last; - unsigned int entry = priv->dirty_tx % txsize; struct sk_buff *skb = priv->tx_skbuff[entry]; struct dma_desc *p; @@ -1378,16 +1368,17 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->hw->desc->release_tx_desc(p, priv->mode); - priv->dirty_tx++; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + priv->dirty_tx = entry; } netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); if (unlikely(netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) { + stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) { netif_tx_lock(priv->dev); if (netif_queue_stopped(priv->dev) && - stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) { + stmmac_tx_avail(priv) > STMMAC_TX_THRESH) { if (netif_msg_tx_done(priv)) pr_debug("%s: restart transmit\n", __func__); netif_wake_queue(priv->dev); @@ -1421,20 +1412,19 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv) static void stmmac_tx_err(struct stmmac_priv *priv) { int i; - int txsize = priv->dma_tx_size; netif_stop_queue(priv->dev); priv->hw->dma->stop_tx(priv->ioaddr); dma_free_tx_skbufs(priv); - for (i = 0; i < txsize; i++) + for (i = 0; i < DMA_TX_SIZE; i++) if (priv->extend_desc) priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic, priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); else priv->hw->desc->init_tx_desc(&priv->dma_tx[i], priv->mode, - (i == txsize - 1)); + (i == DMA_TX_SIZE - 1)); priv->dirty_tx = 0; priv->cur_tx = 0; netdev_reset_queue(priv->dev); @@ -1811,9 +1801,6 @@ static int stmmac_open(struct net_device *dev) memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); priv->xstats.threshold = tc; - /* Create and initialize the TX/RX descriptors chains. */ - priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); - priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); ret = alloc_dma_desc_resources(priv); @@ -1955,7 +1942,6 @@ static int stmmac_release(struct net_device *dev) static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; int entry; int i, csum_insertion = 0, is_jumbo = 0; int nfrags = skb_shinfo(skb)->nr_frags; @@ -1978,7 +1964,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (priv->tx_path_in_lpi_mode) stmmac_disable_eee_mode(priv); - entry = priv->cur_tx % txsize; + entry = priv->cur_tx; + csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); @@ -2013,7 +2000,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int len = skb_frag_size(frag); priv->tx_skbuff[entry] = NULL; - entry = (++priv->cur_tx) % txsize; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + if (priv->extend_desc) desc = (struct dma_desc *)(priv->dma_etx + entry); else @@ -2056,17 +2044,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->hw->desc->set_tx_owner(first); wmb(); - priv->cur_tx++; + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); + + priv->cur_tx = entry; if (netif_msg_pktdata(priv)) { pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", - __func__, (priv->cur_tx % txsize), - (priv->dirty_tx % txsize), entry, first, nfrags); + __func__, (priv->cur_tx % DMA_TX_SIZE), + (priv->dirty_tx % DMA_TX_SIZE), entry, first, nfrags); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_etx, txsize, 1); + stmmac_display_ring((void *)priv->dma_etx, + DMA_TX_SIZE, 1); else - stmmac_display_ring((void *)priv->dma_tx, txsize, 0); + stmmac_display_ring((void *)priv->dma_tx, + DMA_TX_SIZE, 0); pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); @@ -2128,11 +2120,11 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) */ static inline void stmmac_rx_refill(struct stmmac_priv *priv) { - unsigned int rxsize = priv->dma_rx_size; int bfsize = priv->dma_buf_sz; + unsigned int entry = priv->dirty_rx; + int dirty = stmmac_rx_dirty(priv); - for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { - unsigned int entry = priv->dirty_rx % rxsize; + while (dirty-- > 0) { struct dma_desc *p; if (priv->extend_desc) @@ -2168,7 +2160,10 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) wmb(); priv->hw->desc->set_rx_owner(p); wmb(); + + entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE); } + priv->dirty_rx = entry; } /** @@ -2180,8 +2175,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) */ static int stmmac_rx(struct stmmac_priv *priv, int limit) { - unsigned int rxsize = priv->dma_rx_size; - unsigned int entry = priv->cur_rx % rxsize; + unsigned int entry = priv->cur_rx; unsigned int next_entry; unsigned int count = 0; int coe = priv->hw->rx_csum; @@ -2189,9 +2183,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) if (netif_msg_rx_status(priv)) { pr_debug("%s: descriptor ring:\n", __func__); if (priv->extend_desc) - stmmac_display_ring((void *)priv->dma_erx, rxsize, 1); + stmmac_display_ring((void *)priv->dma_erx, + DMA_RX_SIZE, 1); else - stmmac_display_ring((void *)priv->dma_rx, rxsize, 0); + stmmac_display_ring((void *)priv->dma_rx, + DMA_RX_SIZE, 0); } while (count < limit) { int status; @@ -2207,7 +2203,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) count++; - next_entry = (++priv->cur_rx) % rxsize; + priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE); + next_entry = priv->cur_rx; + if (priv->extend_desc) prefetch(priv->dma_erx + next_entry); else @@ -2567,19 +2565,17 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); - unsigned int txsize = priv->dma_tx_size; - unsigned int rxsize = priv->dma_rx_size; if (priv->extend_desc) { seq_printf(seq, "Extended RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq); + sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq); seq_printf(seq, "Extended TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq); + sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq); } else { seq_printf(seq, "RX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq); + sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq); seq_printf(seq, "TX descriptor ring:\n"); - sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq); + sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq); } return 0; @@ -3149,12 +3145,6 @@ static int __init stmmac_cmdline_opt(char *str) } else if (!strncmp(opt, "phyaddr:", 8)) { if (kstrtoint(opt + 8, 0, &phyaddr)) goto err; - } else if (!strncmp(opt, "dma_txsize:", 11)) { - if (kstrtoint(opt + 11, 0, &dma_txsize)) - goto err; - } else if (!strncmp(opt, "dma_rxsize:", 11)) { - if (kstrtoint(opt + 11, 0, &dma_rxsize)) - goto err; } else if (!strncmp(opt, "buf_sz:", 7)) { if (kstrtoint(opt + 7, 0, &buf_sz)) goto err; -- cgit v1.2.3 From 553e2ab3130e9c3d234bee0c80a2a1e5057c8f9a Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:31 +0100 Subject: stmmac: add length field to dma data Currently, the code pulls out the length field when unmapping a buffer directly from the descriptor. This will result in an uncached read to a dma_alloc_coherent() region. There is no need to do this, so this patch simply puts the value directly into a data structure which will hit the cache. Signed-off-by: Fabrice Gasnier Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 3 +++ drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 5 +++++ drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 11 +++++++---- 4 files changed, 16 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 2763772ef1d4..7fa7ab0d90bc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -49,6 +49,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = bmax; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); while (len != 0) { @@ -63,6 +64,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = bmax; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); @@ -75,6 +77,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = len; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 4358a87fbc6e..cfc2f24ba08b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -56,6 +56,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = bmax; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_RING_MODE); @@ -73,6 +75,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = len; + desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_RING_MODE); @@ -84,6 +88,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) if (dma_mapping_error(priv->device, desc->des2)) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = nopaged_len; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, STMMAC_RING_MODE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 7ae7c6430a3f..c497460c9061 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -45,6 +45,7 @@ struct stmmac_resources { struct stmmac_tx_info { dma_addr_t buf; bool map_as_page; + unsigned len; }; struct stmmac_priv { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eb555f0fe815..90a946f3f783 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1093,6 +1093,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) p->des2 = 0; priv->tx_skbuff_dma[i].buf = 0; priv->tx_skbuff_dma[i].map_as_page = false; + priv->tx_skbuff_dma[i].len = 0; priv->tx_skbuff[i] = NULL; } @@ -1136,12 +1137,12 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) if (priv->tx_skbuff_dma[i].map_as_page) dma_unmap_page(priv->device, priv->tx_skbuff_dma[i].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[i].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, priv->tx_skbuff_dma[i].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[i].len, DMA_TO_DEVICE); } @@ -1347,12 +1348,12 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) if (priv->tx_skbuff_dma[entry].map_as_page) dma_unmap_page(priv->device, priv->tx_skbuff_dma[entry].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); else dma_unmap_single(priv->device, priv->tx_skbuff_dma[entry].buf, - priv->hw->desc->get_tx_len(p), + priv->tx_skbuff_dma[entry].len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry].buf = 0; priv->tx_skbuff_dma[entry].map_as_page = false; @@ -1986,6 +1987,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, desc->des2)) goto dma_map_err; priv->tx_skbuff_dma[entry].buf = desc->des2; + priv->tx_skbuff_dma[entry].len = nopaged_len; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion, priv->mode); } else { @@ -2014,6 +2016,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].map_as_page = true; + priv->tx_skbuff_dma[entry].len = len; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, priv->mode); wmb(); -- cgit v1.2.3 From 2a6d8e172639b49a81afc76140dd7ee7c9a24da1 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:32 +0100 Subject: stmmac: add last_segment field to dma data last_segment field is read twice from dma descriptors in stmmac_clean(). Add last_segment to dma data so that this flag is from priv structure in cache instead of memory. It avoids reading twice from memory for each loop in stmmac_clean(). Signed-off-by: Fabrice Gasnier Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 3 ++- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ++++- 3 files changed, 7 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 7fa7ab0d90bc..355eafb5d6c7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -150,8 +150,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + unsigned int entry = priv->dirty_tx; - if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) + if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index c497460c9061..043691852c26 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -46,6 +46,7 @@ struct stmmac_tx_info { dma_addr_t buf; bool map_as_page; unsigned len; + bool last_segment; }; struct stmmac_priv { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 90a946f3f783..feae0dec1f43 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1094,6 +1094,7 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) priv->tx_skbuff_dma[i].buf = 0; priv->tx_skbuff_dma[i].map_as_page = false; priv->tx_skbuff_dma[i].len = 0; + priv->tx_skbuff_dma[i].last_segment = false; priv->tx_skbuff[i] = NULL; } @@ -1326,7 +1327,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) break; /* Verify tx error by looking at the last segment. */ - last = priv->hw->desc->get_tx_ls(p); + last = priv->tx_skbuff_dma[entry].last_segment; if (likely(last)) { int tx_error = priv->hw->desc->tx_status(&priv->dev->stats, @@ -1359,6 +1360,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->tx_skbuff_dma[entry].map_as_page = false; } priv->hw->mode->clean_desc3(priv, p); + priv->tx_skbuff_dma[entry].last_segment = false; if (likely(skb != NULL)) { pkts_compl++; @@ -2028,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Finalize the latest segment. */ priv->hw->desc->close_tx_desc(desc); + priv->tx_skbuff_dma[entry].last_segment = true; wmb(); /* According to the coalesce parameter the IC bit for the latest -- cgit v1.2.3 From 96951366ce8546662de56f58a3885b94326f9670 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:33 +0100 Subject: stmmac: add is_jumbo field to dma data Optimize tx_clean by avoiding a des3 read in stmmac_clean_desc3(). In ring mode, TX, des3 seems only used when xmit a jumbo frame. In case of normal descriptors, it may also be used for time stamping. Clean it in the above two case, without reading it. Signed-off-by: Fabrice Gasnier Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 3 ++- drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 11 ++++++++++- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + 4 files changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 355eafb5d6c7..dacb6542c6dd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -152,7 +152,8 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; unsigned int entry = priv->dirty_tx; - if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc) + if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc && + priv->hwts_tx_en) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index cfc2f24ba08b..c6487746b235 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -57,6 +57,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = bmax; + priv->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, @@ -76,6 +77,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = len; + priv->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, @@ -89,6 +91,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = nopaged_len; + priv->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, STMMAC_RING_MODE); @@ -126,7 +129,13 @@ static void stmmac_init_desc3(struct dma_desc *p) static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { - if (unlikely(p->des3)) + struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; + unsigned int entry = priv->dirty_tx; + + /* des3 is only used for jumbo frames tx or time stamping */ + if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo || + (priv->tx_skbuff_dma[entry].last_segment && + !priv->extend_desc && priv->hwts_tx_en))) p->des3 = 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 043691852c26..0d01f3ea4cc8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -47,6 +47,7 @@ struct stmmac_tx_info { bool map_as_page; unsigned len; bool last_segment; + bool is_jumbo; }; struct stmmac_priv { diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index feae0dec1f43..0194a8f26f8c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1361,6 +1361,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) } priv->hw->mode->clean_desc3(priv, p); priv->tx_skbuff_dma[entry].last_segment = false; + priv->tx_skbuff_dma[entry].is_jumbo = false; if (likely(skb != NULL)) { pkts_compl++; -- cgit v1.2.3 From c1fa3212be5503d802a5c4c451dd4e673fdc603a Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Mon, 29 Feb 2016 14:27:34 +0100 Subject: stmmac: merge get_rx_owner into rx_status routine. The RDES0 register can be read several times while doing RX of a packet. This patch slightly improves RX path performance by reading rdes0 once for two operation: check rx owner, get rx status bits. Signed-off-by: Fabrice Gasnier Acked-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 10 +++++----- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 9 +++------ drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 9 +++------ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 9 +++++---- 4 files changed, 16 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 09291af19039..3ba268e93bc9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -238,10 +238,11 @@ struct stmmac_extra_stats { /* Rx IPC status */ enum rx_frame_status { - good_frame = 0, - discard_frame = 1, - csum_none = 2, - llc_snap = 4, + good_frame = 0x0, + discard_frame = 0x1, + csum_none = 0x2, + llc_snap = 0x4, + dma_own = 0x8, }; enum dma_irq_status { @@ -356,7 +357,6 @@ struct stmmac_desc_ops { /* Get the buffer size from the descriptor */ int (*get_tx_len) (struct dma_desc *p); /* Handle extra events on specific interrupts hw dependent */ - int (*get_rx_owner) (struct dma_desc *p); void (*set_rx_owner) (struct dma_desc *p); /* Get the receive frame size */ int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type); diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 716b80740b58..1a2fce988548 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -186,6 +186,9 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, unsigned int rdes0 = p->des0; int ret = good_frame; + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { x->rx_desc++; @@ -272,11 +275,6 @@ static int enh_desc_get_tx_owner(struct dma_desc *p) return (p->des0 & ETDES0_OWN) >> 31; } -static int enh_desc_get_rx_owner(struct dma_desc *p) -{ - return (p->des0 & RDES0_OWN) >> 31; -} - static void enh_desc_set_tx_owner(struct dma_desc *p) { p->des0 |= ETDES0_OWN; @@ -402,7 +400,6 @@ const struct stmmac_desc_ops enh_desc_ops = { .init_rx_desc = enh_desc_init_rx_desc, .init_tx_desc = enh_desc_init_tx_desc, .get_tx_owner = enh_desc_get_tx_owner, - .get_rx_owner = enh_desc_get_rx_owner, .release_tx_desc = enh_desc_release_tx_desc, .prepare_tx_desc = enh_desc_prepare_tx_desc, .clear_tx_ic = enh_desc_clear_tx_ic, diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 460c573dd3e1..5a91932ff639 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -82,6 +82,9 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, unsigned int rdes0 = p->des0; struct net_device_stats *stats = (struct net_device_stats *)data; + if (unlikely(rdes0 & RDES0_OWN)) + return dma_own; + if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { pr_warn("%s: Oversized frame spanned multiple buffers\n", __func__); @@ -155,11 +158,6 @@ static int ndesc_get_tx_owner(struct dma_desc *p) return (p->des0 & TDES0_OWN) >> 31; } -static int ndesc_get_rx_owner(struct dma_desc *p) -{ - return (p->des0 & RDES0_OWN) >> 31; -} - static void ndesc_set_tx_owner(struct dma_desc *p) { p->des0 |= TDES0_OWN; @@ -277,7 +275,6 @@ const struct stmmac_desc_ops ndesc_ops = { .init_rx_desc = ndesc_init_rx_desc, .init_tx_desc = ndesc_init_tx_desc, .get_tx_owner = ndesc_get_tx_owner, - .get_rx_owner = ndesc_get_rx_owner, .release_tx_desc = ndesc_release_tx_desc, .prepare_tx_desc = ndesc_prepare_tx_desc, .clear_tx_ic = ndesc_clear_tx_ic, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 0194a8f26f8c..796d7c69f902 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2205,7 +2205,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) else p = priv->dma_rx + entry; - if (priv->hw->desc->get_rx_owner(p)) + /* read the status of the incoming frame */ + status = priv->hw->desc->rx_status(&priv->dev->stats, + &priv->xstats, p); + /* check if managed by the DMA otherwise go ahead */ + if (unlikely(status & dma_own)) break; count++; @@ -2218,9 +2222,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) else prefetch(priv->dma_rx + next_entry); - /* read the status of the incoming frame */ - status = priv->hw->desc->rx_status(&priv->dev->stats, - &priv->xstats, p); if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) priv->hw->desc->rx_extended_status(&priv->dev->stats, &priv->xstats, -- cgit v1.2.3 From be434d5075d6be0cda996200b2a20035e1565215 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:35 +0100 Subject: stmmac: optimize tx desc management This patch is to optimize the way to manage the TDES inside the xmit function. When prepare the frame, some settings (e.g. OWN bit) can be merged. This has been reworked to improve the tx performances. Signed-off-by: Fabrice Gasnier Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/chain_mode.c | 13 ++++++++----- drivers/net/ethernet/stmicro/stmmac/common.h | 5 ++--- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 22 +++++++++++++++------- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 15 ++++++++------- drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 7 +++---- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 16 ++++++---------- 6 files changed, 42 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index dacb6542c6dd..b3e669af3005 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -50,7 +50,9 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = bmax; - priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); + /* do not close the descriptor and do not set own bit */ + priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE, + 0, false); while (len != 0) { priv->tx_skbuff[entry] = NULL; @@ -66,8 +68,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = bmax; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, - STMMAC_CHAIN_MODE); - priv->hw->desc->set_tx_owner(desc); + STMMAC_CHAIN_MODE, 1, + false); len -= bmax; i++; } else { @@ -78,9 +80,10 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) return -1; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = len; + /* last descriptor can be set now */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_CHAIN_MODE); - priv->hw->desc->set_tx_owner(desc); + STMMAC_CHAIN_MODE, 1, + true); len = 0; } } diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 3ba268e93bc9..885c0f9808b6 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -338,12 +338,11 @@ struct stmmac_desc_ops { /* Invoked by the xmit function to prepare the tx descriptor */ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, - int csum_flag, int mode); + bool csum_flag, int mode, bool tx_own, + bool ls_ic); /* Set/get the owner of the descriptor */ void (*set_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p); - /* Invoked by the xmit function to close the tx descriptor */ - void (*close_tx_desc) (struct dma_desc *p); /* Clean the tx descriptor as soon as the tx irq is received */ void (*release_tx_desc) (struct dma_desc *p, int mode); /* Clear interrupt on tx frame completion. When this bit is diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 1a2fce988548..1abd80ed09f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -302,7 +302,8 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) } static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag, int mode) + bool csum_flag, int mode, bool tx_own, + bool ls_ic) { unsigned int tdes0 = p->des0; @@ -316,6 +317,19 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, else tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + if (tx_own) + tdes0 |= ETDES0_OWN; + + if (is_fs & tx_own) + /* When the own bit, for the first frame, has to be set, all + * descriptors for the same frame has to be set before, to + * avoid race condition. + */ + wmb(); + + if (ls_ic) + tdes0 |= ETDES0_LAST_SEGMENT | ETDES0_INTERRUPT; + p->des0 = tdes0; if (mode == STMMAC_CHAIN_MODE) @@ -329,11 +343,6 @@ static void enh_desc_clear_tx_ic(struct dma_desc *p) p->des0 &= ~ETDES0_INTERRUPT; } -static void enh_desc_close_tx_desc(struct dma_desc *p) -{ - p->des0 |= ETDES0_LAST_SEGMENT | ETDES0_INTERRUPT; -} - static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { unsigned int csum = 0; @@ -403,7 +412,6 @@ const struct stmmac_desc_ops enh_desc_ops = { .release_tx_desc = enh_desc_release_tx_desc, .prepare_tx_desc = enh_desc_prepare_tx_desc, .clear_tx_ic = enh_desc_clear_tx_ic, - .close_tx_desc = enh_desc_close_tx_desc, .get_tx_ls = enh_desc_get_tx_ls, .set_tx_owner = enh_desc_set_tx_owner, .set_rx_owner = enh_desc_set_rx_owner, diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 5a91932ff639..19cc12dd0f17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -185,7 +185,8 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode) } static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, - int csum_flag, int mode) + bool csum_flag, int mode, bool tx_own, + bool ls_ic) { unsigned int tdes1 = p->des1; @@ -199,6 +200,12 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, else tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT); + if (tx_own) + tdes1 |= TDES0_OWN; + + if (ls_ic) + tdes1 |= TDES1_LAST_SEGMENT | TDES1_INTERRUPT; + p->des1 = tdes1; if (mode == STMMAC_CHAIN_MODE) @@ -212,11 +219,6 @@ static void ndesc_clear_tx_ic(struct dma_desc *p) p->des1 &= ~TDES1_INTERRUPT; } -static void ndesc_close_tx_desc(struct dma_desc *p) -{ - p->des1 |= TDES1_LAST_SEGMENT | TDES1_INTERRUPT; -} - static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) { unsigned int csum = 0; @@ -278,7 +280,6 @@ const struct stmmac_desc_ops ndesc_ops = { .release_tx_desc = ndesc_release_tx_desc, .prepare_tx_desc = ndesc_prepare_tx_desc, .clear_tx_ic = ndesc_clear_tx_ic, - .close_tx_desc = ndesc_close_tx_desc, .get_tx_ls = ndesc_get_tx_ls, .set_tx_owner = ndesc_set_tx_owner, .set_rx_owner = ndesc_set_rx_owner, diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c6487746b235..11c71644f126 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -61,7 +61,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, - STMMAC_RING_MODE); + STMMAC_RING_MODE, 0, false); wmb(); priv->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -81,9 +81,8 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, - STMMAC_RING_MODE); + STMMAC_RING_MODE, 1, true); wmb(); - priv->hw->desc->set_tx_owner(desc); } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); @@ -94,7 +93,7 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) priv->tx_skbuff_dma[entry].is_jumbo = true; desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, - STMMAC_RING_MODE); + STMMAC_RING_MODE, 0, true); } priv->cur_tx = entry; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 796d7c69f902..24c36084e3f5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1991,8 +1991,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) goto dma_map_err; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].len = nopaged_len; + /* do not set the own at this stage */ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, - csum_insertion, priv->mode); + csum_insertion, priv->mode, 0, + nfrags == 0); } else { desc = first; entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); @@ -2003,6 +2005,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; int len = skb_frag_size(frag); + bool last_segment = (i == (nfrags - 1)); priv->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -2021,19 +2024,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, - priv->mode); - wmb(); - priv->hw->desc->set_tx_owner(desc); - wmb(); + priv->mode, 1, last_segment); + priv->tx_skbuff_dma[entry].last_segment = last_segment; } priv->tx_skbuff[entry] = skb; - /* Finalize the latest segment. */ - priv->hw->desc->close_tx_desc(desc); - priv->tx_skbuff_dma[entry].last_segment = true; - - wmb(); /* According to the coalesce parameter the IC bit for the latest * segment could be reset and the timer re-started to invoke the * stmmac_tx function. This approach takes care about the fragments. -- cgit v1.2.3 From c363b6586cd424431e84d921267e101ec67207f5 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Mon, 29 Feb 2016 14:27:36 +0100 Subject: stmmac: optimize tx clean function This patch "inline" get_tx_owner and get_ls routines. It Results in a unique read to tdes0, instead of three, to check TX_OWN and LS bits, and other status bits. It helps improve driver TX path by removing two uncached read/writes inside TX clean loop for enhanced descriptors but not for normal ones because the des1 must be read in any case. Signed-off-by: Fabrice Gasnier Acked-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 8 +++++++ drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 12 ++++++++-- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 13 +++++++++-- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 27 +++++++++++------------ 4 files changed, 42 insertions(+), 18 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 885c0f9808b6..7ccb147710fd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -245,6 +245,14 @@ enum rx_frame_status { dma_own = 0x8, }; +/* Tx status */ +enum tx_frame_status { + tx_done = 0x0, + tx_not_ls = 0x1, + tx_err = 0x2, + tx_dma_own = 0x4, +}; + enum dma_irq_status { tx_hard_error = 0x1, tx_hard_error_bump_tc = 0x2, diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 1abd80ed09f3..957610b72ace 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -31,7 +31,15 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, { struct net_device_stats *stats = (struct net_device_stats *)data; unsigned int tdes0 = p->des0; - int ret = 0; + int ret = tx_done; + + /* Get tx owner first */ + if (unlikely(tdes0 & ETDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes0 & ETDES0_LAST_SEGMENT))) + return tx_not_ls; if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) { if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT)) @@ -71,7 +79,7 @@ static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, dwmac_dma_flush_tx_fifo(ioaddr); } - ret = -1; + ret = tx_err; } if (unlikely(tdes0 & ETDES0_DEFERRED)) diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 19cc12dd0f17..122fb5ad234b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -31,7 +31,16 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, { struct net_device_stats *stats = (struct net_device_stats *)data; unsigned int tdes0 = p->des0; - int ret = 0; + unsigned int tdes1 = p->des1; + int ret = tx_done; + + /* Get tx owner first */ + if (unlikely(tdes0 & TDES0_OWN)) + return tx_dma_own; + + /* Verify tx error by looking at the last segment. */ + if (likely(!(tdes1 & TDES1_LAST_SEGMENT))) + return tx_not_ls; if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) { if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) { @@ -54,7 +63,7 @@ static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x, collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3; stats->collisions += collisions; } - ret = -1; + ret = tx_err; } if (tdes0 & TDES0_VLAN_FRAME) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 24c36084e3f5..d31179f597a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1313,32 +1313,31 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->xstats.tx_clean++; while (entry != priv->cur_tx) { - int last; struct sk_buff *skb = priv->tx_skbuff[entry]; struct dma_desc *p; + int status; if (priv->extend_desc) p = (struct dma_desc *)(priv->dma_etx + entry); else p = priv->dma_tx + entry; - /* Check if the descriptor is owned by the DMA. */ - if (priv->hw->desc->get_tx_owner(p)) - break; - - /* Verify tx error by looking at the last segment. */ - last = priv->tx_skbuff_dma[entry].last_segment; - if (likely(last)) { - int tx_error = - priv->hw->desc->tx_status(&priv->dev->stats, + status = priv->hw->desc->tx_status(&priv->dev->stats, &priv->xstats, p, priv->ioaddr); - if (likely(tx_error == 0)) { + /* Check if the descriptor is owned by the DMA */ + if (unlikely(status & tx_dma_own)) + break; + + /* Just consider the last segment and ...*/ + if (likely(!(status & tx_not_ls))) { + /* ... verify the status error condition */ + if (unlikely(status & tx_err)) { + priv->dev->stats.tx_errors++; + } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; - } else - priv->dev->stats.tx_errors++; - + } stmmac_get_tx_hwtstamp(priv, entry, skb); } if (netif_msg_tx_done(priv)) -- cgit v1.2.3 From fbc80823a93c57f2310dacfabfea6c76424552b6 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:37 +0100 Subject: stmmac: set dirty index out of the loop The dirty index can be updated out of the loop where all the tx resources are claimed. This will help on performances too. Also a useless debug printk has been removed from the main loop. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d31179f597a8..2e4c10ab78bd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1340,9 +1340,6 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) } stmmac_get_tx_hwtstamp(priv, entry, skb); } - if (netif_msg_tx_done(priv)) - pr_debug("%s: curr %d, dirty %d\n", __func__, - priv->cur_tx, priv->dirty_tx); if (likely(priv->tx_skbuff_dma[entry].buf)) { if (priv->tx_skbuff_dma[entry].map_as_page) @@ -1372,8 +1369,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) priv->hw->desc->release_tx_desc(p, priv->mode); entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); - priv->dirty_tx = entry; } + priv->dirty_tx = entry; netdev_completed_queue(priv->dev, pkts_compl, bytes_compl); -- cgit v1.2.3 From 0e80bdc9a72df3b31a9fc2012102a6cc8d664e93 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:38 +0100 Subject: stmmac: first frame prep at the end of xmit routine This patch is to fill the first descriptor just before granting the DMA engine so at the end of the xmit. The patch takes care about the algorithm adopted to mitigate the interrupts, then it fixes the last segment in case of no fragments. Moreover, this new implementation does not pass any "ter" field when prepare the descriptors because this is not necessary. The patch also details the memory barrier in the xmit. As final results, this patch guarantees the same performances but fixing a case if small datagram are sent. In fact, this kind of test is impacted if no coalesce is done. Signed-off-by: Fabrice Gasnier Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/common.h | 6 +- drivers/net/ethernet/stmicro/stmmac/enh_desc.c | 25 ++--- drivers/net/ethernet/stmicro/stmmac/norm_desc.c | 24 ++--- drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 2 - .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 2 +- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 114 ++++++++++++--------- 6 files changed, 95 insertions(+), 78 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7ccb147710fd..f96d257308b0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -100,7 +100,7 @@ struct stmmac_extra_stats { unsigned long napi_poll; unsigned long tx_normal_irq_n; unsigned long tx_clean; - unsigned long tx_reset_ic_bit; + unsigned long tx_set_ic_bit; unsigned long irq_receive_pmt_irq_n; /* MMC info */ unsigned long mmc_tx_irq_n; @@ -347,7 +347,7 @@ struct stmmac_desc_ops { /* Invoked by the xmit function to prepare the tx descriptor */ void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls_ic); + bool ls); /* Set/get the owner of the descriptor */ void (*set_tx_owner) (struct dma_desc *p); int (*get_tx_owner) (struct dma_desc *p); @@ -355,7 +355,7 @@ struct stmmac_desc_ops { void (*release_tx_desc) (struct dma_desc *p, int mode); /* Clear interrupt on tx frame completion. When this bit is * set an interrupt happens as soon as the frame is transmitted */ - void (*clear_tx_ic) (struct dma_desc *p); + void (*set_tx_ic)(struct dma_desc *p); /* Last tx segment reports the transmit status */ int (*get_tx_ls) (struct dma_desc *p); /* Return the transmit status looking at the TDES1 */ diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 957610b72ace..cfb018c7c5eb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c @@ -311,10 +311,15 @@ static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls_ic) + bool ls) { unsigned int tdes0 = p->des0; + if (mode == STMMAC_CHAIN_MODE) + enh_set_tx_desc_len_on_chain(p, len); + else + enh_set_tx_desc_len_on_ring(p, len); + if (is_fs) tdes0 |= ETDES0_FIRST_SEGMENT; else @@ -325,6 +330,10 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, else tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT); + if (ls) + tdes0 |= ETDES0_LAST_SEGMENT; + + /* Finally set the OWN bit. Later the DMA will start! */ if (tx_own) tdes0 |= ETDES0_OWN; @@ -335,20 +344,12 @@ static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, */ wmb(); - if (ls_ic) - tdes0 |= ETDES0_LAST_SEGMENT | ETDES0_INTERRUPT; - p->des0 = tdes0; - - if (mode == STMMAC_CHAIN_MODE) - enh_set_tx_desc_len_on_chain(p, len); - else - enh_set_tx_desc_len_on_ring(p, len); } -static void enh_desc_clear_tx_ic(struct dma_desc *p) +static void enh_desc_set_tx_ic(struct dma_desc *p) { - p->des0 &= ~ETDES0_INTERRUPT; + p->des0 |= ETDES0_INTERRUPT; } static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) @@ -419,7 +420,7 @@ const struct stmmac_desc_ops enh_desc_ops = { .get_tx_owner = enh_desc_get_tx_owner, .release_tx_desc = enh_desc_release_tx_desc, .prepare_tx_desc = enh_desc_prepare_tx_desc, - .clear_tx_ic = enh_desc_clear_tx_ic, + .set_tx_ic = enh_desc_set_tx_ic, .get_tx_ls = enh_desc_get_tx_ls, .set_tx_owner = enh_desc_set_tx_owner, .set_rx_owner = enh_desc_set_rx_owner, diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c index 122fb5ad234b..e13228f115f0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c @@ -195,10 +195,15 @@ static void ndesc_release_tx_desc(struct dma_desc *p, int mode) static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, bool csum_flag, int mode, bool tx_own, - bool ls_ic) + bool ls) { unsigned int tdes1 = p->des1; + if (mode == STMMAC_CHAIN_MODE) + norm_set_tx_desc_len_on_chain(p, len); + else + norm_set_tx_desc_len_on_ring(p, len); + if (is_fs) tdes1 |= TDES1_FIRST_SEGMENT; else @@ -209,23 +214,18 @@ static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, else tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT); + if (ls) + tdes1 |= TDES1_LAST_SEGMENT; + if (tx_own) tdes1 |= TDES0_OWN; - if (ls_ic) - tdes1 |= TDES1_LAST_SEGMENT | TDES1_INTERRUPT; - p->des1 = tdes1; - - if (mode == STMMAC_CHAIN_MODE) - norm_set_tx_desc_len_on_chain(p, len); - else - norm_set_tx_desc_len_on_ring(p, len); } -static void ndesc_clear_tx_ic(struct dma_desc *p) +static void ndesc_set_tx_ic(struct dma_desc *p) { - p->des1 &= ~TDES1_INTERRUPT; + p->des1 |= TDES1_INTERRUPT; } static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) @@ -288,7 +288,7 @@ const struct stmmac_desc_ops ndesc_ops = { .get_tx_owner = ndesc_get_tx_owner, .release_tx_desc = ndesc_release_tx_desc, .prepare_tx_desc = ndesc_prepare_tx_desc, - .clear_tx_ic = ndesc_clear_tx_ic, + .set_tx_ic = ndesc_set_tx_ic, .get_tx_ls = ndesc_get_tx_ls, .set_tx_owner = ndesc_set_tx_owner, .set_rx_owner = ndesc_set_rx_owner, diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 11c71644f126..7723b5d2499a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -62,7 +62,6 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_RING_MODE, 0, false); - wmb(); priv->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); @@ -82,7 +81,6 @@ static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) desc->des3 = desc->des2 + BUF_SIZE_4KiB; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_RING_MODE, 1, true); - wmb(); } else { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 4c6486cc80fb..c803d4cfa044 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -97,7 +97,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { STMMAC_STAT(napi_poll), STMMAC_STAT(tx_normal_irq_n), STMMAC_STAT(tx_clean), - STMMAC_STAT(tx_reset_ic_bit), + STMMAC_STAT(tx_set_ic_bit), STMMAC_STAT(irq_receive_pmt_irq_n), /* MMC info */ STMMAC_STAT(mmc_tx_irq_n), diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2e4c10ab78bd..90b2612db03a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1942,12 +1942,12 @@ static int stmmac_release(struct net_device *dev) static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); - int entry; + unsigned int nopaged_len = skb_headlen(skb); int i, csum_insertion = 0, is_jumbo = 0; int nfrags = skb_shinfo(skb)->nr_frags; + unsigned int entry, first_entry; struct dma_desc *desc, *first; - unsigned int nopaged_len = skb_headlen(skb); - unsigned int enh_desc = priv->plat->enh_desc; + unsigned int enh_desc; spin_lock(&priv->tx_lock); @@ -1965,34 +1965,25 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) stmmac_disable_eee_mode(priv); entry = priv->cur_tx; - + first_entry = entry; csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); - if (priv->extend_desc) + if (likely(priv->extend_desc)) desc = (struct dma_desc *)(priv->dma_etx + entry); else desc = priv->dma_tx + entry; first = desc; + priv->tx_skbuff[first_entry] = skb; + + enh_desc = priv->plat->enh_desc; /* To program the descriptors according to the size of the frame */ if (enh_desc) is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); - if (likely(!is_jumbo)) { - desc->des2 = dma_map_single(priv->device, skb->data, - nopaged_len, DMA_TO_DEVICE); - if (dma_mapping_error(priv->device, desc->des2)) - goto dma_map_err; - priv->tx_skbuff_dma[entry].buf = desc->des2; - priv->tx_skbuff_dma[entry].len = nopaged_len; - /* do not set the own at this stage */ - priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, - csum_insertion, priv->mode, 0, - nfrags == 0); - } else { - desc = first; + if (unlikely(is_jumbo)) { entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); if (unlikely(entry < 0)) goto dma_map_err; @@ -2003,10 +1994,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int len = skb_frag_size(frag); bool last_segment = (i == (nfrags - 1)); - priv->tx_skbuff[entry] = NULL; entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); - if (priv->extend_desc) + if (likely(priv->extend_desc)) desc = (struct dma_desc *)(priv->dma_etx + entry); else desc = priv->dma_tx + entry; @@ -2016,41 +2006,25 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, desc->des2)) goto dma_map_err; /* should reuse desc w/o issues */ + priv->tx_skbuff[entry] = NULL; priv->tx_skbuff_dma[entry].buf = desc->des2; priv->tx_skbuff_dma[entry].map_as_page = true; priv->tx_skbuff_dma[entry].len = len; + priv->tx_skbuff_dma[entry].last_segment = last_segment; + + /* Prepare the descriptor and set the own bit too */ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, priv->mode, 1, last_segment); - priv->tx_skbuff_dma[entry].last_segment = last_segment; } - priv->tx_skbuff[entry] = skb; - - /* According to the coalesce parameter the IC bit for the latest - * segment could be reset and the timer re-started to invoke the - * stmmac_tx function. This approach takes care about the fragments. - */ - priv->tx_count_frames += nfrags + 1; - if (priv->tx_coal_frames > priv->tx_count_frames) { - priv->hw->desc->clear_tx_ic(desc); - priv->xstats.tx_reset_ic_bit++; - mod_timer(&priv->txtimer, - STMMAC_COAL_TIMER(priv->tx_coal_timer)); - } else - priv->tx_count_frames = 0; - - /* To avoid raise condition */ - priv->hw->desc->set_tx_owner(first); - wmb(); - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); priv->cur_tx = entry; if (netif_msg_pktdata(priv)) { - pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d", - __func__, (priv->cur_tx % DMA_TX_SIZE), - (priv->dirty_tx % DMA_TX_SIZE), entry, first, nfrags); + pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d", + __func__, priv->cur_tx, priv->dirty_tx, first_entry, + entry, first, nfrags); if (priv->extend_desc) stmmac_display_ring((void *)priv->dma_etx, @@ -2062,6 +2036,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) pr_debug(">>> frame to be transmitted: "); print_pkt(skb->data, skb->len); } + if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) { if (netif_msg_hw(priv)) pr_debug("%s: stop transmitted packets\n", __func__); @@ -2070,16 +2045,59 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_bytes += skb->len; - if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && - priv->hwts_tx_en)) { - /* declare that device is doing timestamping */ - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - priv->hw->desc->enable_tx_timestamp(first); + /* According to the coalesce parameter the IC bit for the latest + * segment is reset and the timer re-started to clean the tx status. + * This approach takes care about the fragments: desc is the first + * element in case of no SG. + */ + priv->tx_count_frames += nfrags + 1; + if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { + mod_timer(&priv->txtimer, + STMMAC_COAL_TIMER(priv->tx_coal_timer)); + } else { + priv->tx_count_frames = 0; + priv->hw->desc->set_tx_ic(desc); + priv->xstats.tx_set_ic_bit++; } if (!priv->hwts_tx_en) skb_tx_timestamp(skb); + /* Ready to fill the first descriptor and set the OWN bit w/o any + * problems because all the descriptors are actually ready to be + * passed to the DMA engine. + */ + if (likely(!is_jumbo)) { + bool last_segment = (nfrags == 0); + + first->des2 = dma_map_single(priv->device, skb->data, + nopaged_len, DMA_TO_DEVICE); + if (dma_mapping_error(priv->device, first->des2)) + goto dma_map_err; + + priv->tx_skbuff_dma[first_entry].buf = first->des2; + priv->tx_skbuff_dma[first_entry].len = nopaged_len; + priv->tx_skbuff_dma[first_entry].last_segment = last_segment; + + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + priv->hwts_tx_en)) { + /* declare that device is doing timestamping */ + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->hw->desc->enable_tx_timestamp(first); + } + + /* Prepare the first descriptor setting the OWN bit too */ + priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len, + csum_insertion, priv->mode, 1, + last_segment); + + /* The own bit must be the latest setting done when prepare the + * descriptor and then barrier is needed to make sure that + * all is coherent before granting the DMA engine. + */ + smp_wmb(); + } + netdev_sent_queue(dev, skb->len); priv->hw->dma->enable_dma_transmission(priv->ioaddr); -- cgit v1.2.3 From 8e99fc5f887a072ee023eb9a3240eba85e768734 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:39 +0100 Subject: stmmac: do not poll phy handler when attach a switch This patch avoids to call the stmmac_adjust_link when the driver is connected to a switch by using the FIXED_PHY support. Prior this patch the phydev->irq was set as PHY_POLL so periodically the phy handler was invoked spending useless time because the link cannot actually change. Note that the stmmac_adjust_link will be called just one time and this guarantees that the ST glue logic will be setup according to the mode and speed fixed. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 90b2612db03a..eab7ac0f5bc1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -866,6 +866,11 @@ static int stmmac_init_phy(struct net_device *dev) phy_disconnect(phydev); return -ENODEV; } + + /* If attached to a switch, there is no reason to poll phy handler */ + if (!strcmp(priv->plat->phy_bus_name, "fixed")) + phydev->irq = PHY_IGNORE_INTERRUPT; + pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); -- cgit v1.2.3 From 8ecd80a5f6a7e31824c0d49e10c3cf6825c6c291 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Mon, 29 Feb 2016 14:27:40 +0100 Subject: stmmac: fix phy init when attached to a phy phy_bus_name can be NULL when "fixed-link" property isn't used. Then, since "stmmac: do not poll phy handler when attach a switch", phy_bus_name ptr needs to be checked before strcmp is called. Signed-off-by: Fabrice Gasnier Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eab7ac0f5bc1..3cc135559a1d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -868,8 +868,9 @@ static int stmmac_init_phy(struct net_device *dev) } /* If attached to a switch, there is no reason to poll phy handler */ - if (!strcmp(priv->plat->phy_bus_name, "fixed")) - phydev->irq = PHY_IGNORE_INTERRUPT; + if (priv->plat->phy_bus_name) + if (!strcmp(priv->plat->phy_bus_name, "fixed")) + phydev->irq = PHY_IGNORE_INTERRUPT; pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); -- cgit v1.2.3 From 22ad38381547950c64ab8590ebf04a5d3693cbb7 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:41 +0100 Subject: stmmac: do not perform zero-copy for rx frames This patch is to allow this driver to copy tiny frames during the reception process. This is giving more stability while stressing the driver on STi embedded systems. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + .../net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 39 ++++++++++++++ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 61 ++++++++++++++++------ 3 files changed, 86 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 0d01f3ea4cc8..221f5cda70f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -74,6 +74,7 @@ struct stmmac_priv { unsigned int cur_rx; unsigned int dirty_rx; unsigned int dma_buf_sz; + unsigned int rx_copybreak; u32 rx_riwt; int hwts_rx_en; dma_addr_t *rx_skbuff_dma; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index c803d4cfa044..3c7928edfebb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -781,6 +781,43 @@ static int stmmac_get_ts_info(struct net_device *dev, return ethtool_op_get_ts_info(dev, info); } +static int stmmac_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int stmmac_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct stmmac_priv *priv = netdev_priv(dev); + int ret = 0; + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + priv->rx_copybreak = *(u32 *)data; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + static const struct ethtool_ops stmmac_ethtool_ops = { .begin = stmmac_check_if_running, .get_drvinfo = stmmac_ethtool_getdrvinfo, @@ -803,6 +840,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = { .get_ts_info = stmmac_get_ts_info, .get_coalesce = stmmac_get_coalesce, .set_coalesce = stmmac_set_coalesce, + .get_tunable = stmmac_get_tunable, + .set_tunable = stmmac_set_tunable, }; void stmmac_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 3cc135559a1d..2ffe8dd9ce99 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -91,6 +91,8 @@ static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); +#define STMMAC_RX_COPYBREAK 256 + static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_TIMER); @@ -1808,6 +1810,7 @@ static int stmmac_open(struct net_device *dev) priv->xstats.threshold = tc; priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); + priv->rx_copybreak = STMMAC_RX_COPYBREAK; ret = alloc_dma_desc_resources(priv); if (ret < 0) { @@ -2159,8 +2162,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); - - if (unlikely(skb == NULL)) + if (unlikely(!skb)) break; priv->rx_skbuff[entry] = skb; @@ -2282,23 +2284,52 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) pr_debug("\tframe size %d, COE: %d\n", frame_len, status); } - skb = priv->rx_skbuff[entry]; - if (unlikely(!skb)) { - pr_err("%s: Inconsistent Rx descriptor chain\n", - priv->dev->name); - priv->dev->stats.rx_dropped++; - break; + + if (unlikely(frame_len < priv->rx_copybreak)) { + skb = netdev_alloc_skb_ip_align(priv->dev, + frame_len); + if (unlikely(!skb)) { + if (net_ratelimit()) + dev_warn(priv->device, + "packet dropped\n"); + priv->dev->stats.rx_dropped++; + break; + } + + dma_sync_single_for_cpu(priv->device, + priv->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + skb_copy_to_linear_data(skb, + priv-> + rx_skbuff[entry]->data, + frame_len); + + skb_put(skb, frame_len); + dma_sync_single_for_device(priv->device, + priv->rx_skbuff_dma + [entry], frame_len, + DMA_FROM_DEVICE); + } else { + skb = priv->rx_skbuff[entry]; + if (unlikely(!skb)) { + pr_err("%s: Inconsistent Rx chain\n", + priv->dev->name); + priv->dev->stats.rx_dropped++; + break; + } + prefetch(skb->data - NET_IP_ALIGN); + priv->rx_skbuff[entry] = NULL; + + skb_put(skb, frame_len); + dma_unmap_single(priv->device, + priv->rx_skbuff_dma[entry], + priv->dma_buf_sz, + DMA_FROM_DEVICE); } - prefetch(skb->data - NET_IP_ALIGN); - priv->rx_skbuff[entry] = NULL; stmmac_get_rx_hwtstamp(priv, entry, skb); - skb_put(skb, frame_len); - dma_unmap_single(priv->device, - priv->rx_skbuff_dma[entry], - priv->dma_buf_sz, DMA_FROM_DEVICE); - if (netif_msg_pktdata(priv)) { pr_debug("frame received (%dbytes)", frame_len); print_pkt(skb->data, frame_len); -- cgit v1.2.3 From 120e87f91e2980e17356f96055023681f1a4d45b Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:42 +0100 Subject: stmmac: tune rx copy via threshold. There is a threshold now used to also limit the skb allocation when use zero-copy. This is to avoid that there are incoherence in the ring due to a failure on skb allocation under very aggressive testing and under low memory conditions. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 26 +++++++++++++++++++++-- 2 files changed, 25 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index 221f5cda70f3..d6c244f70171 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -75,6 +75,7 @@ struct stmmac_priv { unsigned int dirty_rx; unsigned int dma_buf_sz; unsigned int rx_copybreak; + unsigned int rx_zeroc_thresh; u32 rx_riwt; int hwts_rx_en; dma_addr_t *rx_skbuff_dma; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2ffe8dd9ce99..4c5ce9848ca9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -72,6 +72,7 @@ module_param(phyaddr, int, S_IRUGO); MODULE_PARM_DESC(phyaddr, "Physical device address"); #define STMMAC_TX_THRESH (DMA_TX_SIZE / 4) +#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4) static int flow_ctrl = FLOW_OFF; module_param(flow_ctrl, int, S_IRUGO | S_IWUSR); @@ -2138,6 +2139,14 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) } +static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv) +{ + if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH) + return 0; + + return 1; +} + /** * stmmac_rx_refill - refill used skb preallocated buffers * @priv: driver private structure @@ -2162,8 +2171,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) struct sk_buff *skb; skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); - if (unlikely(!skb)) + if (unlikely(!skb)) { + /* so for a while no zero-copy! */ + priv->rx_zeroc_thresh = STMMAC_RX_THRESH; + if (unlikely(net_ratelimit())) + dev_err(priv->device, + "fail to alloc skb entry %d\n", + entry); break; + } priv->rx_skbuff[entry] = skb; priv->rx_skbuff_dma[entry] = @@ -2179,9 +2195,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) priv->hw->mode->refill_desc3(priv, p); + if (priv->rx_zeroc_thresh > 0) + priv->rx_zeroc_thresh--; + if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); } + wmb(); priv->hw->desc->set_rx_owner(p); wmb(); @@ -2285,7 +2305,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) frame_len, status); } - if (unlikely(frame_len < priv->rx_copybreak)) { + if (unlikely((frame_len < priv->rx_copybreak) || + stmmac_rx_threshold_count(priv))) { skb = netdev_alloc_skb_ip_align(priv->dev, frame_len); if (unlikely(!skb)) { @@ -2320,6 +2341,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) } prefetch(skb->data - NET_IP_ALIGN); priv->rx_skbuff[entry] = NULL; + priv->rx_zeroc_thresh++; skb_put(skb, frame_len); dma_unmap_single(priv->device, -- cgit v1.2.3 From 3796e44ddc1f08230ee920c70ffb5c2e4d1670f6 Mon Sep 17 00:00:00 2001 From: Giuseppe Cavallaro Date: Mon, 29 Feb 2016 14:27:43 +0100 Subject: stmmac: update version to Oct_2015 This patch just updates the driver to the version fully tested on STi platforms. This version is Oct_2015. Signed-off-by: Giuseppe Cavallaro Signed-off-by: Alexandre TORGUE Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index d6c244f70171..8bbab97895fe 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -24,7 +24,7 @@ #define __STMMAC_H__ #define STMMAC_RESOURCE_NAME "stmmaceth" -#define DRV_MODULE_VERSION "March_2013" +#define DRV_MODULE_VERSION "Oct_2015" #include #include -- cgit v1.2.3 From da08e4259fbfd769d1e825a685d44132c8576450 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 1 Mar 2016 17:19:32 +0530 Subject: cxgb4/cxgb4vf: Use fl capacity to check if fl needs to be replenished Use freelist capacity instead of freelist size while checking, if freelist needs to be refilled Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 2 +- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index b4eb4680a27c..22d972030927 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2226,7 +2226,7 @@ static int process_responses(struct sge_rspq *q, int budget) budget_left--; } - if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16) + if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) __refill_fl(q->adap, &rxq->fl); return budget - budget_left; } diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 6528231d8a59..9772aad22bca 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -1864,7 +1864,7 @@ static int process_responses(struct sge_rspq *rspq, int budget) * for new buffer pointers, refill the Free List. */ if (rspq->offset >= 0 && - rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) + fl_cap(&rxq->fl) - rxq->fl.avail >= 2*FL_PER_EQ_UNIT) __refill_fl(rspq->adapter, &rxq->fl); return budget - budget_left; } -- cgit v1.2.3 From edadad80d65bf9c7aa9f2605dbd2eef94ccd47c0 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 1 Mar 2016 17:19:33 +0530 Subject: cxgb4/cxgb4vf: For T6 adapter, set FBMIN to 64 bytes T4 and T5 hardware will not coalesce Free List PCI-E Fetch Requests if the Host Driver provides more Free List Pointers than the Fetch Burst Minimum value. So if we set FBMIN to 64 bytes and the Host Driver supplies 128 bytes of Free List Pointer data, the hardware will issue two 64-byte PCI-E Fetch Requests rather than a single coallesced 128-byte Fetch Request. T6 fixes this. So, for T4/T5 we set the FBMIN value to 128 Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/sge.c | 12 +++++++++++- drivers/net/ethernet/chelsio/cxgb4/t4_values.h | 1 + drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 13 ++++++++++++- 3 files changed, 24 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 22d972030927..deca4a2956cc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2611,8 +2611,18 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) | FW_IQ_CMD_FL0CONGCIF_F | FW_IQ_CMD_FL0CONGEN_F); + /* In T6, for egress queue type FL there is internal overhead + * of 16B for header going into FLM module. Hence the maximum + * allowed burst size is 448 bytes. For T4/T5, the hardware + * doesn't coalesce fetch requests if more than 64 bytes of + * Free List pointers are provided, so we use a 128-byte Fetch + * Burst Minimum there (T6 implements coalescing so we can use + * the smaller 64-byte value there). + */ c.fl0dcaen_to_fl0cidxfthresh = - htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) | + htons(FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? + FETCHBURSTMIN_128B_X : + FETCHBURSTMIN_64B_X) | FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index a5231fa771db..36cf3073ca37 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -65,6 +65,7 @@ #define TIMERREG_COUNTER0_X 0 #define FETCHBURSTMIN_64B_X 2 +#define FETCHBURSTMIN_128B_X 3 #define FETCHBURSTMAX_256B_X 2 #define FETCHBURSTMAX_512B_X 3 diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 9772aad22bca..ba6a4e3471f0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2300,9 +2300,20 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) | FW_IQ_CMD_FL0PACKEN_F | FW_IQ_CMD_FL0PADEN_F); + + /* In T6, for egress queue type FL there is internal overhead + * of 16B for header going into FLM module. Hence the maximum + * allowed burst size is 448 bytes. For T4/T5, the hardware + * doesn't coalesce fetch requests if more than 64 bytes of + * Free List pointers are provided, so we use a 128-byte Fetch + * Burst Minimum there (T6 implements coalescing so we can use + * the smaller 64-byte value there). + */ cmd.fl0dcaen_to_fl0cidxfthresh = cpu_to_be16( - FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) | + FW_IQ_CMD_FL0FBMIN_V(chip <= CHELSIO_T5 ? + FETCHBURSTMIN_128B_X : + FETCHBURSTMIN_64B_X) | FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ? FETCHBURSTMAX_512B_X : FETCHBURSTMAX_256B_X)); -- cgit v1.2.3 From cb440364c72cbbf3dde084cf65b997b40ecb8efd Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 1 Mar 2016 17:19:34 +0530 Subject: cxgb4vf: Make sge init code more readable Adds a new function t4vf_fl_pkt_align() and use the same in SGE initialization code to find out freelist packet alignment Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/sge.c | 40 ++-------------- drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h | 1 + drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c | 55 ++++++++++++++++++++++ 3 files changed, 59 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index ba6a4e3471f0..1ccd282949a5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2618,7 +2618,6 @@ int t4vf_sge_init(struct adapter *adapter) u32 fl0 = sge_params->sge_fl_buffer_size[0]; u32 fl1 = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; - unsigned int ingpadboundary, ingpackboundary, ingpad_shift; /* * Start by vetting the basic SGE parameters which have been set up by @@ -2630,7 +2629,8 @@ int t4vf_sge_init(struct adapter *adapter) fl0, fl1); return -EINVAL; } - if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) { + if ((sge_params->sge_control & RXPKTCPLMODE_F) != + RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) { dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n"); return -EINVAL; } @@ -2643,41 +2643,7 @@ int t4vf_sge_init(struct adapter *adapter) s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); - - /* T4 uses a single control field to specify both the PCIe Padding and - * Packing Boundary. T5 introduced the ability to specify these - * separately. The actual Ingress Packet Data alignment boundary - * within Packed Buffer Mode is the maximum of these two - * specifications. (Note that it makes no real practical sense to - * have the Pading Boudary be larger than the Packing Boundary but you - * could set the chip up that way and, in fact, legacy T4 code would - * end doing this because it would initialize the Padding Boundary and - * leave the Packing Boundary initialized to 0 (16 bytes).) - * Padding Boundary values in T6 starts from 8B, - * where as it is 32B for T4 and T5. - */ - if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) - ingpad_shift = INGPADBOUNDARY_SHIFT_X; - else - ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; - - ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) + - ingpad_shift); - if (is_t4(adapter->params.chip)) { - s->fl_align = ingpadboundary; - } else { - /* T5 has a different interpretation of one of the PCIe Packing - * Boundary values. - */ - ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2); - if (ingpackboundary == INGPACKBOUNDARY_16B_X) - ingpackboundary = 16; - else - ingpackboundary = 1 << (ingpackboundary + - INGPACKBOUNDARY_SHIFT_X); - - s->fl_align = max(ingpadboundary, ingpackboundary); - } + s->fl_align = t4vf_fl_pkt_align(adapter); /* A FL with <= fl_starve_thres buffers is starving and a periodic * timer will attempt to refill it. This needs to be larger than the diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h index 6ce302fe1a61..9b40a85cc1e4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h @@ -309,6 +309,7 @@ int t4vf_port_init(struct adapter *, int); int t4vf_fw_reset(struct adapter *); int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *); +int t4vf_fl_pkt_align(struct adapter *adapter); enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS }; int t4vf_bar2_sge_qregs(struct adapter *adapter, unsigned int qid, diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 54220117dcba..fed83d88fc4e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -417,6 +417,61 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams, return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL); } +/** + * t4vf_fl_pkt_align - return the fl packet alignment + * @adapter: the adapter + * + * T4 has a single field to specify the packing and padding boundary. + * T5 onwards has separate fields for this and hence the alignment for + * next packet offset is maximum of these two. And T6 changes the + * Ingress Padding Boundary Shift, so it's all a mess and it's best + * if we put this in low-level Common Code ... + * + */ +int t4vf_fl_pkt_align(struct adapter *adapter) +{ + u32 sge_control, sge_control2; + unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift; + + sge_control = adapter->params.sge.sge_control; + + /* T4 uses a single control field to specify both the PCIe Padding and + * Packing Boundary. T5 introduced the ability to specify these + * separately. The actual Ingress Packet Data alignment boundary + * within Packed Buffer Mode is the maximum of these two + * specifications. (Note that it makes no real practical sense to + * have the Pading Boudary be larger than the Packing Boundary but you + * could set the chip up that way and, in fact, legacy T4 code would + * end doing this because it would initialize the Padding Boundary and + * leave the Packing Boundary initialized to 0 (16 bytes).) + * Padding Boundary values in T6 starts from 8B, + * where as it is 32B for T4 and T5. + */ + if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) + ingpad_shift = INGPADBOUNDARY_SHIFT_X; + else + ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X; + + ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift); + + fl_align = ingpadboundary; + if (!is_t4(adapter->params.chip)) { + /* T5 has a different interpretation of one of the PCIe Packing + * Boundary values. + */ + sge_control2 = adapter->params.sge.sge_control2; + ingpackboundary = INGPACKBOUNDARY_G(sge_control2); + if (ingpackboundary == INGPACKBOUNDARY_16B_X) + ingpackboundary = 16; + else + ingpackboundary = 1 << (ingpackboundary + + INGPACKBOUNDARY_SHIFT_X); + + fl_align = max(ingpadboundary, ingpackboundary); + } + return fl_align; +} + /** * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information * @adapter: the adapter -- cgit v1.2.3 From 5d7b80522b3f1c60147b8c0405ca57e015be26e5 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 1 Mar 2016 17:19:35 +0530 Subject: cxgb4vf: Remove redundant adapter ready check during probe Function t4vf_wait_dev_ready() is already called in t4vf_prep_adapter(), no need to call it again in adap_init0(). Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 8337514ababb..5d989e4c42dc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2236,16 +2236,6 @@ static int adap_init0(struct adapter *adapter) int err; u32 param, val = 0; - /* - * Wait for the device to become ready before proceeding ... - */ - err = t4vf_wait_dev_ready(adapter); - if (err) { - dev_err(adapter->pdev_dev, "device didn't become ready:" - " err=%d\n", err); - return err; - } - /* * Some environments do not properly handle PCIE FLRs -- e.g. in Linux * 2.6.31 and later we can't call pci_reset_function() in order to -- cgit v1.2.3 From f1ea2fe05dcc0104b445c127bf4bcf2b1b498b76 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 1 Mar 2016 17:19:36 +0530 Subject: cxgb4vf: Remove dead functions collect_netdev_[um]c_list_addrs Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 46 ---------------------- 1 file changed, 46 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5d989e4c42dc..91857b81009e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -862,52 +862,6 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev) return ns; } -/* - * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting - * at a specified offset within the list, into an array of addrss pointers and - * return the number collected. - */ -static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int offset, - unsigned int maxaddrs) -{ - unsigned int index = 0; - unsigned int naddr = 0; - const struct netdev_hw_addr *ha; - - for_each_dev_addr(dev, ha) - if (index++ >= offset) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } - return naddr; -} - -/* - * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting - * at a specified offset within the list, into an array of addrss pointers and - * return the number collected. - */ -static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev, - const u8 **addr, - unsigned int offset, - unsigned int maxaddrs) -{ - unsigned int index = 0; - unsigned int naddr = 0; - const struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) - if (index++ >= offset) { - addr[naddr++] = ha->addr; - if (naddr >= maxaddrs) - break; - } - return naddr; -} - static inline int cxgb4vf_set_addr_hash(struct port_info *pi) { struct adapter *adapter = pi->adapter; -- cgit v1.2.3 From 998fc1d0803bff10d6d8a6f6ed67689f327d9315 Mon Sep 17 00:00:00 2001 From: Eric Engestrom Date: Mon, 29 Feb 2016 16:40:23 +0000 Subject: ethernet/atl1c: remove left over dead code Left over from c24588afc536a35c924d014f13b669b20ccf8553 ("atl1c: using fixed TXQ configuration for l2cb and l1c") Signed-off-by: Eric Engestrom Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1c/atl1c_main.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 8b5988e210d5..d0084d4d1a9b 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -65,10 +65,6 @@ static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); static int atl1c_configure(struct atl1c_adapter *adapter); static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); -static const u16 atl1c_pay_load_size[] = { - 128, 256, 512, 1024, 2048, 4096, -}; - static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; -- cgit v1.2.3 From dd215430dc9132fc0505cb4d5aa6dc6243ce8f9c Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Thu, 3 Mar 2016 11:26:18 +0200 Subject: NFC: pn544: Drop two useless checks in ACPI probe path When pn544_hci_i2c_acpi_request_resources() gets called we already know that the entries in ->acpi_match_table have matched ACPI ID of the device. In addition I2C client pointer cannot be NULL in any case (otherwise I2C core would not call ->probe() for the driver in the first place). Drop the two useless checks from the driver. Signed-off-by: Mika Westerberg Signed-off-by: Samuel Ortiz --- drivers/nfc/pn544/i2c.c | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 76c318444304..45d0e667d7ae 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -877,20 +877,8 @@ exit_state_wait_secure_write_answer: static int pn544_hci_i2c_acpi_request_resources(struct i2c_client *client) { struct pn544_i2c_phy *phy = i2c_get_clientdata(client); - const struct acpi_device_id *id; struct gpio_desc *gpiod_en, *gpiod_fw; - struct device *dev; - - if (!client) - return -EINVAL; - - dev = &client->dev; - - /* Match the struct device against a given list of ACPI IDs */ - id = acpi_match_device(dev->driver->acpi_match_table, dev); - - if (!id) - return -ENODEV; + struct device *dev = &client->dev; /* Get EN GPIO from ACPI */ gpiod_en = devm_gpiod_get_index(dev, PN544_GPIO_NAME_EN, 1, -- cgit v1.2.3 From e0b6ce00b1ec0434e96629ab64f3e16dcb4f33b7 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Wed, 3 Feb 2016 21:07:43 +0530 Subject: ath10k: fix pointless update of peer stats list We periodically receive f/w stats event for updating the rx duration and there is no reason to keep on appending the f/w stats peer list, as this gets completely cleaned up when the user polls for f/w stats {pdev, vdev, peer stats}. Only don't print the warning message in the case PEER_STATS service is enabled Fixes: 856e7c3 ("ath10k: add debugfs support for Per STA total rx duration") Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 848a0ddca722..a633152787e0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -351,8 +351,10 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb) if (peer_stats_svc) ath10k_sta_update_rx_duration(ar, &stats.peers); - if (ar->debug.fw_stats_done && !peer_stats_svc) { - ath10k_warn(ar, "received unsolicited stats update event\n"); + if (ar->debug.fw_stats_done) { + if (!peer_stats_svc) + ath10k_warn(ar, "received unsolicited stats update event\n"); + goto free; } -- cgit v1.2.3 From 53a5c9bc53ce51f65699a43c67ab167436d28083 Mon Sep 17 00:00:00 2001 From: Ashok Raj Nagarajan Date: Fri, 5 Feb 2016 21:12:48 +0530 Subject: ath10k: fix pktlog in QCA99X0 Currently, we are providing wrong payload data of pktlog to trace points. Data we receive from FW through copy engine 8 contains pktlog data alone. We don't need to parse anything in driver before handing it to trace points. Fixes: afb0bf7f530b ("ath10k: add support for pktlog in QCA99X0") Signed-off-by: Ashok Raj Nagarajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index cc957a625605..bf778f291133 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2145,11 +2145,7 @@ EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler); void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, struct sk_buff *skb) { - struct ath10k_pktlog_10_4_hdr *hdr = - (struct ath10k_pktlog_10_4_hdr *)skb->data; - - trace_ath10k_htt_pktlog(ar, hdr->payload, - sizeof(*hdr) + __le16_to_cpu(hdr->size)); + trace_ath10k_htt_pktlog(ar, skb->data, skb->len); dev_kfree_skb_any(skb); } EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); -- cgit v1.2.3 From 18353749170033405790ed62a3ae2aa2d3d8a82a Mon Sep 17 00:00:00 2001 From: Ashok Raj Nagarajan Date: Fri, 5 Feb 2016 21:12:49 +0530 Subject: ath10k: add hw_rev to trace events to support pktlog pktlog data is different between firmware variants (eg. 10.2 vs 10.4). To have a unified user space script to decode pktlog trace events generated, it is desirable to know which firmware variant has provided the events and thereby decode the pktlogs appropriately. Hardware revision (hw_rev) helps to determine the firmware variant sending these trace events. So add hw_rev to trace events. Signed-off-by: Ashok Raj Nagarajan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/trace.h | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h index 71bdb368813d..e0d00cef0bd8 100644 --- a/drivers/net/wireless/ath/ath10k/trace.h +++ b/drivers/net/wireless/ath/ath10k/trace.h @@ -250,6 +250,7 @@ TRACE_EVENT(ath10k_wmi_dbglog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type); __field(size_t, buf_len) __dynamic_array(u8, buf, buf_len) ), @@ -257,14 +258,16 @@ TRACE_EVENT(ath10k_wmi_dbglog, TP_fast_assign( __assign_str(device, dev_name(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev)); + __entry->hw_type = ar->hw_rev; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(buf), buf, buf_len); ), TP_printk( - "%s %s len %zu", + "%s %s %d len %zu", __get_str(driver), __get_str(device), + __entry->hw_type, __entry->buf_len ) ); @@ -277,6 +280,7 @@ TRACE_EVENT(ath10k_htt_pktlog, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type); __field(u16, buf_len) __dynamic_array(u8, pktlog, buf_len) ), @@ -284,14 +288,16 @@ TRACE_EVENT(ath10k_htt_pktlog, TP_fast_assign( __assign_str(device, dev_name(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev)); + __entry->hw_type = ar->hw_rev; __entry->buf_len = buf_len; memcpy(__get_dynamic_array(pktlog), buf, buf_len); ), TP_printk( - "%s %s size %hu", + "%s %s %d size %hu", __get_str(driver), __get_str(device), + __entry->hw_type, __entry->buf_len ) ); @@ -440,6 +446,7 @@ TRACE_EVENT(ath10k_htt_rx_desc, TP_STRUCT__entry( __string(device, dev_name(ar->dev)) __string(driver, dev_driver_string(ar->dev)) + __field(u8, hw_type); __field(u16, len) __dynamic_array(u8, rxdesc, len) ), @@ -447,14 +454,16 @@ TRACE_EVENT(ath10k_htt_rx_desc, TP_fast_assign( __assign_str(device, dev_name(ar->dev)); __assign_str(driver, dev_driver_string(ar->dev)); + __entry->hw_type = ar->hw_rev; __entry->len = len; memcpy(__get_dynamic_array(rxdesc), data, len); ), TP_printk( - "%s %s rxdesc len %d", + "%s %s %d rxdesc len %d", __get_str(driver), __get_str(device), + __entry->hw_type, __entry->len ) ); -- cgit v1.2.3 From 22baa98097df3eb92a51e8661fda5dd7c0f1eb93 Mon Sep 17 00:00:00 2001 From: Anton Protopopov Date: Wed, 10 Feb 2016 11:58:55 -0500 Subject: ath10k: fix erroneous return value The ath10k_pci_hif_exchange_bmi_msg() function may return the positive value EIO instead of -EIO in case of error. Signed-off-by: Anton Protopopov Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index 0e338b657210..b3cff1d3364a 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -1772,7 +1772,7 @@ int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, DMA_FROM_DEVICE); ret = dma_mapping_error(ar->dev, resp_paddr); if (ret) { - ret = EIO; + ret = -EIO; goto err_req; } -- cgit v1.2.3 From e7827e512af01c6220e2f1cc53853f11cd742eef Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Fri, 12 Feb 2016 11:40:58 +0530 Subject: ath10k: reduce rx_lock contention for htt rx indication Received frame indications are queued into a skb list and latest processed by txrx tasklet. This skb queue is protected by htt rx lock. Since the entire rx processing till delivering frame to mac80211 and replenish tasks are processed under rx_lock protection, there might be some delay in queuing newly received rx frame into that list on multicore systems. Optimize this by using skb list lock while accessing rx completion queue instead of htt rx lock. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index bf778f291133..a31de7b82da2 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2011,9 +2011,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IND: - spin_lock_bh(&htt->rx_ring.lock); - __skb_queue_tail(&htt->rx_compl_q, skb); - spin_unlock_bh(&htt->rx_ring.lock); + skb_queue_tail(&htt->rx_compl_q, skb); tasklet_schedule(&htt->txrx_compl_task); return; case HTT_T2H_MSG_TYPE_PEER_MAP: { @@ -2111,9 +2109,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) break; } case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: { - spin_lock_bh(&htt->rx_ring.lock); - __skb_queue_tail(&htt->rx_in_ord_compl_q, skb); - spin_unlock_bh(&htt->rx_ring.lock); + skb_queue_tail(&htt->rx_in_ord_compl_q, skb); tasklet_schedule(&htt->txrx_compl_task); return; } @@ -2170,16 +2166,18 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) dev_kfree_skb_any(skb); } - spin_lock_bh(&htt->rx_ring.lock); - while ((skb = __skb_dequeue(&htt->rx_compl_q))) { + while ((skb = skb_dequeue(&htt->rx_compl_q))) { resp = (struct htt_resp *)skb->data; + spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_handler(htt, &resp->rx_ind); + spin_unlock_bh(&htt->rx_ring.lock); dev_kfree_skb_any(skb); } - while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) { + while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { + spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_in_ord_ind(ar, skb); + spin_unlock_bh(&htt->rx_ring.lock); dev_kfree_skb_any(skb); } - spin_unlock_bh(&htt->rx_ring.lock); } -- cgit v1.2.3 From da6416cac6b9439167964cae1302d9cfe764bf1d Mon Sep 17 00:00:00 2001 From: Rajkumar Manoharan Date: Fri, 12 Feb 2016 11:40:59 +0530 Subject: ath10k: process htt rx indication as batch mode On multicore systems, it is possible that txrx tasket can run in parallel with pci tasklet (i.e smp affinity of ath10k irq is assigned to multiple CPUs). Feeding and consuming from the same rx completion list leads to txrx tasklet runs for longer period. Prevent this by processing a snapshot of rx queue by moving list into temporary list. Consecutive received frames will be processed in next batch. Signed-off-by: Rajkumar Manoharan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/htt_rx.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index a31de7b82da2..ae9b686a4e91 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -2151,22 +2151,34 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) struct ath10k_htt *htt = (struct ath10k_htt *)ptr; struct ath10k *ar = htt->ar; struct sk_buff_head tx_q; + struct sk_buff_head rx_q; + struct sk_buff_head rx_ind_q; struct htt_resp *resp; struct sk_buff *skb; unsigned long flags; __skb_queue_head_init(&tx_q); + __skb_queue_head_init(&rx_q); + __skb_queue_head_init(&rx_ind_q); spin_lock_irqsave(&htt->tx_compl_q.lock, flags); skb_queue_splice_init(&htt->tx_compl_q, &tx_q); spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags); + spin_lock_irqsave(&htt->rx_compl_q.lock, flags); + skb_queue_splice_init(&htt->rx_compl_q, &rx_q); + spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags); + + spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags); + skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q); + spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags); + while ((skb = __skb_dequeue(&tx_q))) { ath10k_htt_rx_frm_tx_compl(htt->ar, skb); dev_kfree_skb_any(skb); } - while ((skb = skb_dequeue(&htt->rx_compl_q))) { + while ((skb = __skb_dequeue(&rx_q))) { resp = (struct htt_resp *)skb->data; spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_handler(htt, &resp->rx_ind); @@ -2174,7 +2186,7 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr) dev_kfree_skb_any(skb); } - while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) { + while ((skb = __skb_dequeue(&rx_ind_q))) { spin_lock_bh(&htt->rx_ring.lock); ath10k_htt_rx_in_ord_ind(ar, skb); spin_unlock_bh(&htt->rx_ring.lock); -- cgit v1.2.3 From af9a6a3ad04cb042fe1b723335c411c1ab9b7959 Mon Sep 17 00:00:00 2001 From: Anilkumar Kolli Date: Tue, 23 Feb 2016 12:19:57 +0530 Subject: ath10k: reduce number of peers to support peer stats feature To enable per peer stats feature we are reducing the number of peers. Firmware has introduced tx stats feature. We have memory limitation in firmware to add these additional bytes. These are the new variables introduced in the firmware. ======== ======================= Variable Bytes required/per rate ======== ======================= TX success packets 1 TX failed packets 1 Retry packets 1 Success bytes 2 TX failed bytes 2 Retry bytes 2 Tx duration 4 Rate 1 Bw and AMPDU flags 1 Total 16 (because of allocation in word pattern) Firmware sends these tx_stats in pktlog. If we consider 4 feedbacks at a time, Frimware need about ~1K memory for coding and 8192 bytes required / per rate [ 4*16*128(peers)]. To accommodate this firmware needs to reduce 10 peers. This fixes a firmware crash with firmware-5.bin_10.2.4.70.22-2. Signed-off-by: Anilkumar Kolli Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/core.c | 9 +++++++-- drivers/net/wireless/ath/ath10k/hw.h | 5 +++++ drivers/net/wireless/ath/ath10k/wmi.c | 10 ++++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 4d3176492ae7..c84c2d30ef1f 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -1509,8 +1509,13 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) case ATH10K_FW_WMI_OP_VERSION_10_1: case ATH10K_FW_WMI_OP_VERSION_10_2: case ATH10K_FW_WMI_OP_VERSION_10_2_4: - ar->max_num_peers = TARGET_10X_NUM_PEERS; - ar->max_num_stations = TARGET_10X_NUM_STATIONS; + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS; + ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS; + } else { + ar->max_num_peers = TARGET_10X_NUM_PEERS; + ar->max_num_stations = TARGET_10X_NUM_STATIONS; + } ar->max_num_vdevs = TARGET_10X_NUM_VDEVS; ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC; ar->fw_stats_req_mask = WMI_STAT_PEER; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 2dece8db83f8..f0cfbc745c97 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -375,14 +375,19 @@ enum ath10k_hw_4addr_pad { #define TARGET_10X_MAC_AGGR_DELIM 0 #define TARGET_10X_AST_SKID_LIMIT 128 #define TARGET_10X_NUM_STATIONS 128 +#define TARGET_10X_TX_STATS_NUM_STATIONS 118 #define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \ (TARGET_10X_NUM_VDEVS)) +#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \ + (TARGET_10X_NUM_VDEVS)) #define TARGET_10X_NUM_OFFLOAD_PEERS 0 #define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0 #define TARGET_10X_NUM_PEER_KEYS 2 #define TARGET_10X_NUM_TIDS_MAX 256 #define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ (TARGET_10X_NUM_PEERS) * 2) +#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \ + (TARGET_10X_TX_STATS_NUM_PEERS) * 2) #define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2)) #define TARGET_10X_RX_TIMEOUT_LO_PRI 100 diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 1ce67423224a..70261387d1a5 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -5516,9 +5516,15 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar) u32 len, val, features; config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS); - config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS); - config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) { + config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS); + config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS); + } else { + config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS); + config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS); + } + config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT); config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK); config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK); -- cgit v1.2.3 From c28e6f06ff401ce8f37d58ff6d8123664cd65a51 Mon Sep 17 00:00:00 2001 From: Mohammed Shafi Shajakhan Date: Tue, 23 Feb 2016 12:58:36 +0530 Subject: ath10k: fix sanity check on enabling btcoex via debugfs First check for the device state before enabling / disabling btcoex, also return a proper error value. Enabling / disabling btcoex ideally does a f/w + ath10k_core_restart so the checks that are applicable for 'simulate_fw_crash' shall be applicable for this as well Signed-off-by: Mohammed Shafi Shajakhan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath10k/debug.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index a633152787e0..076d29b53ddf 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2114,6 +2114,7 @@ static ssize_t ath10k_write_btcoex(struct file *file, struct ath10k *ar = file->private_data; char buf[32]; size_t buf_size; + int ret = 0; bool val; buf_size = min(count, (sizeof(buf) - 1)); @@ -2127,6 +2128,12 @@ static ssize_t ath10k_write_btcoex(struct file *file, mutex_lock(&ar->conf_mutex); + if (ar->state != ATH10K_STATE_ON && + ar->state != ATH10K_STATE_RESTARTED) { + ret = -ENETDOWN; + goto exit; + } + if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) goto exit; @@ -2135,17 +2142,15 @@ static ssize_t ath10k_write_btcoex(struct file *file, else clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); - if (ar->state != ATH10K_STATE_ON) - goto exit; - ath10k_info(ar, "restarting firmware due to btcoex change"); queue_work(ar->workqueue, &ar->restart_work); + ret = count; exit: mutex_unlock(&ar->conf_mutex); - return count; + return ret; } static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf, -- cgit v1.2.3 From 82def495d118c6d36300c1cf2448c276273be603 Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Fri, 5 Feb 2016 09:45:50 +0800 Subject: ath9k: make NF load complete quickly and reliably Make NF load complete quickly and reliably. NF load execution is delayed by HW to end of frame if frame Rx or Tx is ongoing. Increasing timeout to max frame duration. If NF cal is ongoing before NF load, stop it before load, and restart it afterwards. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/calib.c | 38 ++++++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c index 3e2e24e4843f..37f6d66d1671 100644 --- a/drivers/net/wireless/ath/ath9k/calib.c +++ b/drivers/net/wireless/ath/ath9k/calib.c @@ -241,6 +241,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask; struct ath_common *common = ath9k_hw_common(ah); s16 default_nf = ath9k_hw_get_default_nf(ah, chan); + u32 bb_agc_ctl = REG_READ(ah, AR_PHY_AGC_CONTROL); if (ah->caldata) h = ah->caldata->nfCalHist; @@ -263,6 +264,16 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) } } + /* + * stop NF cal if ongoing to ensure NF load completes immediately + * (or after end rx/tx frame if ongoing) + */ + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) { + REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + REG_RMW_BUFFER_FLUSH(ah); + ENABLE_REG_RMW_BUFFER(ah); + } + /* * Load software filtered NF value into baseband internal minCCApwr * variable. @@ -276,17 +287,32 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) /* * Wait for load to complete, should be fast, a few 10s of us. - * The max delay was changed from an original 250us to 10000us - * since 250us often results in NF load timeout and causes deaf - * condition during stress testing 12/12/2009 + * The max delay was changed from an original 250us to 22.2 msec. + * This would increase timeout to the longest possible frame + * (11n max length 22.1 msec) */ - for (j = 0; j < 10000; j++) { + for (j = 0; j < 22200; j++) { if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & - AR_PHY_AGC_CONTROL_NF) == 0) + AR_PHY_AGC_CONTROL_NF) == 0) break; udelay(10); } + /* + * Restart NF so it can continue. + */ + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NF) { + ENABLE_REG_RMW_BUFFER(ah); + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_ENABLE_NF) + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_ENABLE_NF); + if (bb_agc_ctl & AR_PHY_AGC_CONTROL_NO_UPDATE_NF) + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, + AR_PHY_AGC_CONTROL_NO_UPDATE_NF); + REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); + REG_RMW_BUFFER_FLUSH(ah); + } + /* * We timed out waiting for the noisefloor to load, probably due to an * in-progress rx. Simply return here and allow the load plenty of time @@ -296,7 +322,7 @@ int ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan) * here, the baseband nf cal will just be capped by our present * noisefloor until the next calibration timer. */ - if (j == 10000) { + if (j == 22200) { ath_dbg(common, ANY, "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n", REG_READ(ah, AR_PHY_AGC_CONTROL)); -- cgit v1.2.3 From e9a26010f607621597f25e55d94faf85a2d0d5a4 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 12 Feb 2016 12:55:21 +0100 Subject: ath9k: reduce stack usage in ar9003_aic_cal_post_process In some configurations, this function uses more than the warning limit of 1024 bytes: drivers/net/wireless/ath/ath9k/ar9003_aic.c: In function 'ar9003_aic_cal_post_process': drivers/net/wireless/ath/ath9k/ar9003_aic.c:434:1: error: the frame size of 1040 bytes is larger than 1024 bytes [-Werror=frame-larger-than=] It turns out that there are two large arrays on the stack here, but almost all the data in them is never used outside of the loop in which it gets written, so we can replace the array with a single instance. The .valid flag is used later, so I'm replacing the array of structures with an array of bools. An obvious follow-up optimization would be to replace it with a bitmask and set_bit()/find_first_bit()/ find_last_bit()/... operations. However, I have not tested this patch, so I sticked to the simpler transformation that does the job of reducing the stack usage to a harmless level. Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_aic.c | 79 +++++++++++++++-------------- drivers/net/wireless/ath/ath9k/ar9003_aic.h | 1 - 2 files changed, 40 insertions(+), 40 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.c b/drivers/net/wireless/ath/ath9k/ar9003_aic.c index 1db119d77783..547cd46da260 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_aic.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.c @@ -53,19 +53,19 @@ static bool ar9003_hw_is_aic_enabled(struct ath_hw *ah) return true; } -static int16_t ar9003_aic_find_valid(struct ath_aic_sram_info *cal_sram, +static int16_t ar9003_aic_find_valid(bool *cal_sram_valid, bool dir, u8 index) { int16_t i; if (dir) { for (i = index + 1; i < ATH_AIC_MAX_BT_CHANNEL; i++) { - if (cal_sram[i].valid) + if (cal_sram_valid[i]) break; } } else { for (i = index - 1; i >= 0; i--) { - if (cal_sram[i].valid) + if (cal_sram_valid[i]) break; } } @@ -264,7 +264,7 @@ static u8 ar9003_aic_cal_start(struct ath_hw *ah, u8 min_valid_count) static bool ar9003_aic_cal_post_process(struct ath_hw *ah) { struct ath9k_hw_aic *aic = &ah->btcoex_hw.aic; - struct ath_aic_sram_info cal_sram[ATH_AIC_MAX_BT_CHANNEL]; + bool cal_sram_valid[ATH_AIC_MAX_BT_CHANNEL]; struct ath_aic_out_info aic_sram[ATH_AIC_MAX_BT_CHANNEL]; u32 dir_path_gain_idx, quad_path_gain_idx, value; u32 fixed_com_att_db; @@ -272,33 +272,34 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah) int16_t i; bool ret = true; - memset(&cal_sram, 0, sizeof(cal_sram)); + memset(&cal_sram_valid, 0, sizeof(cal_sram_valid)); memset(&aic_sram, 0, sizeof(aic_sram)); for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { + struct ath_aic_sram_info sram; value = aic->aic_sram[i]; - cal_sram[i].valid = + cal_sram_valid[i] = sram.valid = MS(value, AR_PHY_AIC_SRAM_VALID); - cal_sram[i].rot_quad_att_db = + sram.rot_quad_att_db = MS(value, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB); - cal_sram[i].vga_quad_sign = + sram.vga_quad_sign = MS(value, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN); - cal_sram[i].rot_dir_att_db = + sram.rot_dir_att_db = MS(value, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB); - cal_sram[i].vga_dir_sign = + sram.vga_dir_sign = MS(value, AR_PHY_AIC_SRAM_VGA_DIR_SIGN); - cal_sram[i].com_att_6db = + sram.com_att_6db = MS(value, AR_PHY_AIC_SRAM_COM_ATT_6DB); - if (cal_sram[i].valid) { - dir_path_gain_idx = cal_sram[i].rot_dir_att_db + - com_att_db_table[cal_sram[i].com_att_6db]; - quad_path_gain_idx = cal_sram[i].rot_quad_att_db + - com_att_db_table[cal_sram[i].com_att_6db]; + if (sram.valid) { + dir_path_gain_idx = sram.rot_dir_att_db + + com_att_db_table[sram.com_att_6db]; + quad_path_gain_idx = sram.rot_quad_att_db + + com_att_db_table[sram.com_att_6db]; - dir_path_sign = (cal_sram[i].vga_dir_sign) ? 1 : -1; - quad_path_sign = (cal_sram[i].vga_quad_sign) ? 1 : -1; + dir_path_sign = (sram.vga_dir_sign) ? 1 : -1; + quad_path_sign = (sram.vga_quad_sign) ? 1 : -1; aic_sram[i].dir_path_gain_lin = dir_path_sign * aic_lin_table[dir_path_gain_idx]; @@ -310,16 +311,16 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah) for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { int16_t start_idx, end_idx; - if (cal_sram[i].valid) + if (cal_sram_valid[i]) continue; - start_idx = ar9003_aic_find_valid(cal_sram, 0, i); - end_idx = ar9003_aic_find_valid(cal_sram, 1, i); + start_idx = ar9003_aic_find_valid(cal_sram_valid, 0, i); + end_idx = ar9003_aic_find_valid(cal_sram_valid, 1, i); if (start_idx < 0) { /* extrapolation */ start_idx = end_idx; - end_idx = ar9003_aic_find_valid(cal_sram, 1, start_idx); + end_idx = ar9003_aic_find_valid(cal_sram_valid, 1, start_idx); if (end_idx < 0) { ret = false; @@ -342,7 +343,7 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah) if (end_idx < 0) { /* extrapolation */ - end_idx = ar9003_aic_find_valid(cal_sram, 0, start_idx); + end_idx = ar9003_aic_find_valid(cal_sram_valid, 0, start_idx); if (end_idx < 0) { ret = false; @@ -378,19 +379,21 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah) } /* From dir/quad_path_gain_lin to sram. */ - i = ar9003_aic_find_valid(cal_sram, 1, 0); + i = ar9003_aic_find_valid(cal_sram_valid, 1, 0); if (i < 0) { i = 0; ret = false; } - fixed_com_att_db = com_att_db_table[cal_sram[i].com_att_6db]; + fixed_com_att_db = com_att_db_table[MS(aic->aic_sram[i], + AR_PHY_AIC_SRAM_COM_ATT_6DB)]; for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { int16_t rot_dir_path_att_db, rot_quad_path_att_db; + struct ath_aic_sram_info sram; - aic_sram[i].sram.vga_dir_sign = + sram.vga_dir_sign = (aic_sram[i].dir_path_gain_lin >= 0) ? 1 : 0; - aic_sram[i].sram.vga_quad_sign= + sram.vga_quad_sign = (aic_sram[i].quad_path_gain_lin >= 0) ? 1 : 0; rot_dir_path_att_db = @@ -400,33 +403,31 @@ static bool ar9003_aic_cal_post_process(struct ath_hw *ah) ar9003_aic_find_index(0, abs(aic_sram[i].quad_path_gain_lin)) - fixed_com_att_db; - aic_sram[i].sram.com_att_6db = + sram.com_att_6db = ar9003_aic_find_index(1, fixed_com_att_db); - aic_sram[i].sram.valid = 1; + sram.valid = 1; - aic_sram[i].sram.rot_dir_att_db = + sram.rot_dir_att_db = min(max(rot_dir_path_att_db, (int16_t)ATH_AIC_MIN_ROT_DIR_ATT_DB), ATH_AIC_MAX_ROT_DIR_ATT_DB); - aic_sram[i].sram.rot_quad_att_db = + sram.rot_quad_att_db = min(max(rot_quad_path_att_db, (int16_t)ATH_AIC_MIN_ROT_QUAD_ATT_DB), ATH_AIC_MAX_ROT_QUAD_ATT_DB); - } - for (i = 0; i < ATH_AIC_MAX_BT_CHANNEL; i++) { - aic->aic_sram[i] = (SM(aic_sram[i].sram.vga_dir_sign, + aic->aic_sram[i] = (SM(sram.vga_dir_sign, AR_PHY_AIC_SRAM_VGA_DIR_SIGN) | - SM(aic_sram[i].sram.vga_quad_sign, + SM(sram.vga_quad_sign, AR_PHY_AIC_SRAM_VGA_QUAD_SIGN) | - SM(aic_sram[i].sram.com_att_6db, + SM(sram.com_att_6db, AR_PHY_AIC_SRAM_COM_ATT_6DB) | - SM(aic_sram[i].sram.valid, + SM(sram.valid, AR_PHY_AIC_SRAM_VALID) | - SM(aic_sram[i].sram.rot_dir_att_db, + SM(sram.rot_dir_att_db, AR_PHY_AIC_SRAM_ROT_DIR_ATT_DB) | - SM(aic_sram[i].sram.rot_quad_att_db, + SM(sram.rot_quad_att_db, AR_PHY_AIC_SRAM_ROT_QUAD_ATT_DB)); } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_aic.h b/drivers/net/wireless/ath/ath9k/ar9003_aic.h index 86f40644be43..9512c63799f2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_aic.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_aic.h @@ -50,7 +50,6 @@ struct ath_aic_sram_info { struct ath_aic_out_info { int16_t dir_path_gain_lin; int16_t quad_path_gain_lin; - struct ath_aic_sram_info sram; }; u8 ar9003_aic_calibration(struct ath_hw *ah); -- cgit v1.2.3 From 25c0f301425cecdc51d7cc6eb4fb163d464236bc Mon Sep 17 00:00:00 2001 From: Miaoqing Pan Date: Thu, 18 Feb 2016 17:20:02 +0800 Subject: ath9k: clear bb filter calibration power threshold JP WiFi certification for bandwidth of channel 14 failed, the OBW is lower than the requirement. Clear the bb filter calibration power threshold to increase OBW(+2). The fix only for qca9531 chip now. Signed-off-by: Miaoqing Pan Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/ath9k/ar9003_phy.c | 7 ++++++- drivers/net/wireless/ath/ath9k/ar9003_phy.h | 3 +++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index abd964691d8c..06c1ca6e8290 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -976,9 +976,14 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, /* * JAPAN regulatory. */ - if (chan->channel == 2484) + if (chan->channel == 2484) { ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1); + if (AR_SREV_9531(ah)) + REG_RMW_FIELD(ah, AR_PHY_FCAL_2_0, + AR_PHY_FLC_PWR_THRESH, 0); + } + ah->modes_index = modesIndex; ar9003_hw_override_ini(ah); ar9003_hw_set_channel_regs(ah, chan); diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h index c5f8bc4b5595..566da789f97e 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -487,6 +487,9 @@ #define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150) #define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158) +#define AR_PHY_FLC_PWR_THRESH 7 +#define AR_PHY_FLC_PWR_THRESH_S 0 + #define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3 #define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0 -- cgit v1.2.3 From 5eb4dce3b3471ec9d1ea2945fa3d2bab4ac7e100 Mon Sep 17 00:00:00 2001 From: John Fastabend Date: Mon, 29 Feb 2016 11:26:13 -0800 Subject: net: relax setup_tc ndo op handle restriction I added this check in setup_tc to multiple drivers, if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) Unfortunately restricting to TC_H_ROOT like this breaks the old instantiation of mqprio to setup a hardware qdisc. This patch relaxes the test to only check the type to make it equivalent to the check before I broke it. With this the old instantiation continues to work. A good smoke test is to setup mqprio with, # tc qdisc add dev eth4 root mqprio num_tc 8 \ map 0 1 2 3 4 5 6 7 \ queues 0@0 1@1 2@2 3@3 4@4 5@5 6@6 7@7 Fixes: e4c6734eaab9 ("net: rework ndo tc op to consume additional qdisc handle paramete") Reported-by: Singh Krishneil Reported-by: Jake Keller CC: Murali Karicheri CC: Shradha Shah CC: Or Gerlitz CC: Ariel Elior CC: Jeff Kirsher CC: Bruce Allan CC: Jesse Brandeburg CC: Don Skidmore Signed-off-by: John Fastabend Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +- drivers/net/ethernet/intel/fm10k/fm10k_netdev.c | 2 +- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 2 +- drivers/net/ethernet/sfc/tx.c | 2 +- drivers/net/ethernet/ti/netcp_core.c | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 33606840ae15..ebf9224b2d31 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1632,7 +1632,7 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, struct xgbe_prv_data *pdata = netdev_priv(netdev); u8 tc; - if (handle != TC_H_ROOT || tc_to_netdev->type != TC_SETUP_MQPRIO) + if (tc_to_netdev->type != TC_SETUP_MQPRIO) return -EINVAL; tc = tc_to_netdev->tc; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 45843d150868..a949783c8fc3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -4275,7 +4275,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) int __bnx2x_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; return bnx2x_setup_tc(dev, tc->tc); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aa6a3189caca..ce6b075842ee 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5447,7 +5447,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct bnxt *bp = netdev_priv(dev); u8 tc; - if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO) + if (ntc->type != TC_SETUP_MQPRIO) return -EINVAL; tc = ntc->tc; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c index dc1a82148ff0..d09a8dd71fc2 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c @@ -1207,7 +1207,7 @@ err_queueing_scheme: static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; return fm10k_setup_tc(dev, tc->tc); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index b893ff8e65f5..4d6223da4a19 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8419,7 +8419,7 @@ int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto, } } - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; return ixgbe_setup_tc(dev, tc->tc); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e26b110e27da..16b26d17c54c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -73,7 +73,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) static int __mlx4_en_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; return mlx4_en_setup_tc(dev, tc->tc); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 2cdb5718ed66..233778911557 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -571,7 +571,7 @@ int efx_setup_tc(struct net_device *net_dev, u32 handle, __be16 proto, unsigned tc, num_tc; int rc; - if (handle != TC_H_ROOT || ntc->type != TC_SETUP_MQPRIO) + if (ntc->type != TC_SETUP_MQPRIO) return -EINVAL; num_tc = ntc->tc; diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index ed0c30f590d4..1d0942c53120 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1860,7 +1860,7 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto, /* setup tc must be called under rtnl lock */ ASSERT_RTNL(); - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; /* Sanity-check the number of traffic classes requested */ -- cgit v1.2.3 From 3d1cbe839ac3aa7b23a274b691092c90f9bf1b8e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 2 Mar 2016 10:40:54 +0100 Subject: net: mellanox: add DEVLINK dependencies The new NET_DEVLINK infrastructure can be a loadable module, but the drivers using it might be built-in, which causes link errors like: drivers/net/built-in.o: In function `mlx4_load_one': :(.text+0x2fbfda): undefined reference to `devlink_port_register' :(.text+0x2fc084): undefined reference to `devlink_port_unregister' drivers/net/built-in.o: In function `mlxsw_sx_port_remove': :(.text+0x33a03a): undefined reference to `devlink_port_type_clear' :(.text+0x33a04e): undefined reference to `devlink_port_unregister' There are multiple ways to avoid this: a) add 'depends on NET_DEVLINK || !NET_DEVLINK' dependencies for each user b) use 'select NET_DEVLINK' from each driver that uses it and hide the symbol in Kconfig. c) make NET_DEVLINK a 'bool' option so we don't have to list it as a dependency, and rely on the APIs to be stubbed out when it is disabled d) use IS_REACHABLE() rather than IS_ENABLED() to check for NET_DEVLINK in include/net/devlink.h This implements a variation of approach a) by adding an intermediate symbol that drivers can depend on, and changes the three drivers using it. Signed-off-by: Arnd Bergmann Fixes: 09d4d087cd48 ("mlx4: Implement devlink interface") Fixes: c4745500e988 ("mlxsw: Implement devlink interface") Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/infiniband/hw/mlx4/Kconfig | 1 + drivers/net/ethernet/mellanox/mlx4/Kconfig | 1 + drivers/net/ethernet/mellanox/mlxsw/Kconfig | 1 + net/Kconfig | 9 +++++++++ 4 files changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig index fc01deac1d3c..db4aa13ebae0 100644 --- a/drivers/infiniband/hw/mlx4/Kconfig +++ b/drivers/infiniband/hw/mlx4/Kconfig @@ -1,6 +1,7 @@ config MLX4_INFINIBAND tristate "Mellanox ConnectX HCA support" depends on NETDEVICES && ETHERNET && PCI && INET + depends on MAY_USE_DEVLINK select NET_VENDOR_MELLANOX select MLX4_CORE ---help--- diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 1486ce902a56..9ca3734ebb6b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -4,6 +4,7 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" + depends on MAY_USE_DEVLINK depends on PCI select MLX4_CORE select PTP_1588_CLOCK diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index ce26adcb4988..2ad7f67854d5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -4,6 +4,7 @@ config MLXSW_CORE tristate "Mellanox Technologies Switch ASICs support" + depends on MAY_USE_DEVLINK ---help--- This driver supports Mellanox Technologies Switch ASICs family. diff --git a/net/Kconfig b/net/Kconfig index 6c9cfb0d7639..2760825e53fa 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -403,6 +403,15 @@ config NET_DEVLINK infrastructure to support access to physical chip-wide config and monitoring. +config MAY_USE_DEVLINK + tristate + default m if NET_DEVLINK=m + default y if NET_DEVLINK=y || NET_DEVLINK=n + help + Drivers using the devlink infrastructure should have a dependency + on MAY_USE_DEVLINK to ensure they do not cause link errors when + devlink is a loadable module and the driver using it is built-in. + endif # if NET # Used by archs to tell that they support BPF_JIT -- cgit v1.2.3 From 274ba628a32ee15bb8e08b8a9240c354066ed24a Mon Sep 17 00:00:00 2001 From: Simon Horman Date: Wed, 2 Mar 2016 10:28:13 +0900 Subject: sh_eth, ravb: Use ARCH_RENESAS Make use of ARCH_RENESAS in place of ARCH_SHMOBILE. This is part of an ongoing process to migrate from ARCH_SHMOBILE to ARCH_RENESAS the motivation for which being that RENESAS seems to be a more appropriate name than SHMOBILE for the majority of Renesas ARM based SoCs. Signed-off-by: Simon Horman Acked-by: Geert Uytterhoeven Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 270c4c9cac7f..4f132cf177cd 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_RENESAS config SH_ETH tristate "Renesas SuperH Ethernet support" depends on HAS_DMA - depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST + depends on ARCH_RENESAS || SUPERH || COMPILE_TEST select CRC32 select MII select MDIO_BITBANG @@ -32,7 +32,7 @@ config SH_ETH config RAVB tristate "Renesas Ethernet AVB support" depends on HAS_DMA - depends on ARCH_SHMOBILE || COMPILE_TEST + depends on ARCH_RENESAS || COMPILE_TEST select CRC32 select MII select MDIO_BITBANG -- cgit v1.2.3 From b54b8c2d6e3cd1db17405a402ba42da5a9c8bf1d Mon Sep 17 00:00:00 2001 From: Lada Trimasova Date: Thu, 3 Mar 2016 17:07:46 +0300 Subject: net: ezchip: adapt driver to little endian architecture Since ezchip network driver is written with big endian EZChip platform it is necessary to add support for little endian architecture. The first issue is that the order of the bits in a bit field is implementation specific. So all the bit fields are removed. Named constants are used to access necessary fields. And the second one is that network byte order is big endian. For example, data on ethernet is transmitted with most-significant octet (byte) first. So in case of little endian architecture it is important to swap data byte order when we read it from register. In case of unaligned access we can use "get_unaligned_be32" and in other case we can use function "ioread32_rep" which reads all data from register and works either with little endian or big endian architecture. And then when we are going to write data to register we need to restore byte order using the function "put_unaligned_be32" in case of unaligned access and in other case "iowrite32_rep". The last little fix is a space between type and pointer to observe coding style. Signed-off-by: Lada Trimasova Cc: Alexey Brodkin Cc: Noam Camus Cc: Tal Zilcer Cc: Arnd Bergmann Acked-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/ezchip/nps_enet.c | 222 +++++++++++---------- drivers/net/ethernet/ezchip/nps_enet.h | 348 +++++++++++---------------------- 2 files changed, 238 insertions(+), 332 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c index b1026689b78f..1f23845a0694 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.c +++ b/drivers/net/ethernet/ezchip/nps_enet.c @@ -43,20 +43,21 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev, bool dst_is_aligned = IS_ALIGNED((unsigned long)dst, sizeof(u32)); /* In case dst is not aligned we need an intermediate buffer */ - if (dst_is_aligned) - for (i = 0; i < len; i++, reg++) - *reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); + if (dst_is_aligned) { + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len); + reg += len; + } else { /* !dst_is_aligned */ for (i = 0; i < len; i++, reg++) { u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - put_unaligned(buf, reg); + put_unaligned_be32(buf, reg); } } - /* copy last bytes (if any) */ if (last) { - u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF); - memcpy((u8*)reg, &buf, last); + u32 buf; + ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1); + memcpy((u8 *)reg, &buf, last); } } @@ -66,26 +67,28 @@ static u32 nps_enet_rx_handler(struct net_device *ndev) u32 work_done = 0; struct nps_enet_priv *priv = netdev_priv(ndev); struct sk_buff *skb; - struct nps_enet_rx_ctl rx_ctrl; + u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); + u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; + u32 rx_ctrl_er = (rx_ctrl_value & RX_CTL_ER_MASK) >> RX_CTL_ER_SHIFT; + u32 rx_ctrl_crc = (rx_ctrl_value & RX_CTL_CRC_MASK) >> RX_CTL_CRC_SHIFT; - rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - frame_len = rx_ctrl.nr; + frame_len = (rx_ctrl_value & RX_CTL_NR_MASK) >> RX_CTL_NR_SHIFT; /* Check if we got RX */ - if (!rx_ctrl.cr) + if (!rx_ctrl_cr) return work_done; /* If we got here there is a work for us */ work_done++; /* Check Rx error */ - if (rx_ctrl.er) { + if (rx_ctrl_er) { ndev->stats.rx_errors++; err = 1; } /* Check Rx CRC error */ - if (rx_ctrl.crc) { + if (rx_ctrl_crc) { ndev->stats.rx_crc_errors++; ndev->stats.rx_dropped++; err = 1; @@ -136,23 +139,24 @@ rx_irq_frame_done: static void nps_enet_tx_handler(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_tx_ctl tx_ctrl; - - tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT; + u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT; /* Check if we got TX */ - if (!priv->tx_packet_sent || tx_ctrl.ct) + if (!priv->tx_packet_sent || tx_ctrl_ct) return; /* Ack Tx ctrl register */ nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, 0); /* Check Tx transmit error */ - if (unlikely(tx_ctrl.et)) { + if (unlikely(tx_ctrl_et)) { ndev->stats.tx_errors++; } else { ndev->stats.tx_packets++; - ndev->stats.tx_bytes += tx_ctrl.nt; + ndev->stats.tx_bytes += tx_ctrl_nt; } dev_kfree_skb(priv->tx_skb); @@ -178,13 +182,16 @@ static int nps_enet_poll(struct napi_struct *napi, int budget) nps_enet_tx_handler(ndev); work_done = nps_enet_rx_handler(ndev); if (work_done < budget) { - struct nps_enet_buf_int_enable buf_int_enable; + u32 buf_int_enable_value = 0; napi_complete(napi); - buf_int_enable.rx_rdy = NPS_ENET_ENABLE; - buf_int_enable.tx_done = NPS_ENET_ENABLE; + + /* set tx_done and rx_rdy bits */ + buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; + buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; + nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, - buf_int_enable.value); + buf_int_enable_value); } return work_done; @@ -205,13 +212,12 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) { struct net_device *ndev = dev_instance; struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_rx_ctl rx_ctrl; - struct nps_enet_tx_ctl tx_ctrl; - - rx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); - tx_ctrl.value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL); + u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL); + u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT; + u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT; - if ((!tx_ctrl.ct && priv->tx_packet_sent) || rx_ctrl.cr) + if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr) if (likely(napi_schedule_prep(&priv->napi))) { nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0); __napi_schedule(&priv->napi); @@ -223,22 +229,24 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance) static void nps_enet_set_hw_mac_address(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_mac_cfg_1 ge_mac_cfg_1; - struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2; + u32 ge_mac_cfg_1_value = 0; + u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; /* set MAC address in HW */ - ge_mac_cfg_1.octet_0 = ndev->dev_addr[0]; - ge_mac_cfg_1.octet_1 = ndev->dev_addr[1]; - ge_mac_cfg_1.octet_2 = ndev->dev_addr[2]; - ge_mac_cfg_1.octet_3 = ndev->dev_addr[3]; - ge_mac_cfg_2->octet_4 = ndev->dev_addr[4]; - ge_mac_cfg_2->octet_5 = ndev->dev_addr[5]; + ge_mac_cfg_1_value |= ndev->dev_addr[0] << CFG_1_OCTET_0_SHIFT; + ge_mac_cfg_1_value |= ndev->dev_addr[1] << CFG_1_OCTET_1_SHIFT; + ge_mac_cfg_1_value |= ndev->dev_addr[2] << CFG_1_OCTET_2_SHIFT; + ge_mac_cfg_1_value |= ndev->dev_addr[3] << CFG_1_OCTET_3_SHIFT; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_4_MASK) + | ndev->dev_addr[4] << CFG_2_OCTET_4_SHIFT; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_OCTET_5_MASK) + | ndev->dev_addr[5] << CFG_2_OCTET_5_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_1, - ge_mac_cfg_1.value); + ge_mac_cfg_1_value); nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, - ge_mac_cfg_2->value); + *ge_mac_cfg_2_value); } /** @@ -254,93 +262,97 @@ static void nps_enet_set_hw_mac_address(struct net_device *ndev) static void nps_enet_hw_reset(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_rst ge_rst; - struct nps_enet_phase_fifo_ctl phase_fifo_ctl; + u32 ge_rst_value = 0, phase_fifo_ctl_value = 0; - ge_rst.value = 0; - phase_fifo_ctl.value = 0; /* Pcs reset sequence*/ - ge_rst.gmac_0 = NPS_ENET_ENABLE; - nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value); + ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT; + nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); usleep_range(10, 20); - ge_rst.value = 0; - nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst.value); + nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value); /* Tx fifo reset sequence */ - phase_fifo_ctl.rst = NPS_ENET_ENABLE; - phase_fifo_ctl.init = NPS_ENET_ENABLE; + phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_RST_SHIFT; + phase_fifo_ctl_value |= NPS_ENET_ENABLE << PHASE_FIFO_CTL_INIT_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, - phase_fifo_ctl.value); + phase_fifo_ctl_value); usleep_range(10, 20); - phase_fifo_ctl.value = 0; + phase_fifo_ctl_value = 0; nps_enet_reg_set(priv, NPS_ENET_REG_PHASE_FIFO_CTL, - phase_fifo_ctl.value); + phase_fifo_ctl_value); } static void nps_enet_hw_enable_control(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_mac_cfg_0 ge_mac_cfg_0; - struct nps_enet_buf_int_enable buf_int_enable; - struct nps_enet_ge_mac_cfg_2 *ge_mac_cfg_2 = &priv->ge_mac_cfg_2; - struct nps_enet_ge_mac_cfg_3 *ge_mac_cfg_3 = &priv->ge_mac_cfg_3; + u32 ge_mac_cfg_0_value = 0, buf_int_enable_value = 0; + u32 *ge_mac_cfg_2_value = &priv->ge_mac_cfg_2_value; + u32 *ge_mac_cfg_3_value = &priv->ge_mac_cfg_3_value; s32 max_frame_length; - ge_mac_cfg_0.value = 0; - buf_int_enable.value = 0; /* Enable Rx and Tx statistics */ - ge_mac_cfg_2->stat_en = NPS_ENET_GE_MAC_CFG_2_STAT_EN; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_STAT_EN_MASK) + | NPS_ENET_GE_MAC_CFG_2_STAT_EN << CFG_2_STAT_EN_SHIFT; /* Discard packets with different MAC address */ - ge_mac_cfg_2->disc_da = NPS_ENET_ENABLE; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; /* Discard multicast packets */ - ge_mac_cfg_2->disc_mc = NPS_ENET_ENABLE; + *ge_mac_cfg_2_value = (*ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, - ge_mac_cfg_2->value); + *ge_mac_cfg_2_value); /* Discard Packets bigger than max frame length */ max_frame_length = ETH_HLEN + ndev->mtu + ETH_FCS_LEN; - if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) - ge_mac_cfg_3->max_len = max_frame_length; + if (max_frame_length <= NPS_ENET_MAX_FRAME_LENGTH) { + *ge_mac_cfg_3_value = + (*ge_mac_cfg_3_value & ~CFG_3_MAX_LEN_MASK) + | max_frame_length << CFG_3_MAX_LEN_SHIFT; + } /* Enable interrupts */ - buf_int_enable.rx_rdy = NPS_ENET_ENABLE; - buf_int_enable.tx_done = NPS_ENET_ENABLE; + buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT; + buf_int_enable_value |= NPS_ENET_ENABLE << TX_DONE_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, - buf_int_enable.value); + buf_int_enable_value); /* Write device MAC address to HW */ nps_enet_set_hw_mac_address(ndev); /* Rx and Tx HW features */ - ge_mac_cfg_0.tx_pad_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_crc_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.rx_crc_strip = NPS_ENET_ENABLE; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_PAD_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_CRC_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_CRC_STRIP_SHIFT; /* IFG configuration */ - ge_mac_cfg_0.rx_ifg = NPS_ENET_GE_MAC_CFG_0_RX_IFG; - ge_mac_cfg_0.tx_ifg = NPS_ENET_GE_MAC_CFG_0_TX_IFG; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_RX_IFG << CFG_0_RX_IFG_SHIFT; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_TX_IFG << CFG_0_TX_IFG_SHIFT; /* preamble configuration */ - ge_mac_cfg_0.rx_pr_check_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_pr_len = NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_PR_CHECK_EN_SHIFT; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_TX_PR_LEN << CFG_0_TX_PR_LEN_SHIFT; /* enable flow control frames */ - ge_mac_cfg_0.tx_fc_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.rx_fc_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_fc_retr = NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR; - ge_mac_cfg_3->cf_drop = NPS_ENET_ENABLE; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_FC_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_FC_EN_SHIFT; + ge_mac_cfg_0_value |= + NPS_ENET_GE_MAC_CFG_0_TX_FC_RETR << CFG_0_TX_FC_RETR_SHIFT; + *ge_mac_cfg_3_value = (*ge_mac_cfg_3_value & ~CFG_3_CF_DROP_MASK) + | NPS_ENET_ENABLE << CFG_3_CF_DROP_SHIFT; /* Enable Rx and Tx */ - ge_mac_cfg_0.rx_en = NPS_ENET_ENABLE; - ge_mac_cfg_0.tx_en = NPS_ENET_ENABLE; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_RX_EN_SHIFT; + ge_mac_cfg_0_value |= NPS_ENET_ENABLE << CFG_0_TX_EN_SHIFT; nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_3, - ge_mac_cfg_3->value); + *ge_mac_cfg_3_value); nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_0, - ge_mac_cfg_0.value); + ge_mac_cfg_0_value); } static void nps_enet_hw_disable_control(struct net_device *ndev) @@ -358,31 +370,28 @@ static void nps_enet_send_frame(struct net_device *ndev, struct sk_buff *skb) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_tx_ctl tx_ctrl; + u32 tx_ctrl_value = 0; short length = skb->len; u32 i, len = DIV_ROUND_UP(length, sizeof(u32)); u32 *src = (void *)skb->data; bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32)); - tx_ctrl.value = 0; /* In case src is not aligned we need an intermediate buffer */ if (src_is_aligned) - for (i = 0; i < len; i++, src++) - nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src); + iowrite32_rep(priv->regs_base + NPS_ENET_REG_TX_BUF, src, len); else /* !src_is_aligned */ for (i = 0; i < len; i++, src++) nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, - get_unaligned(src)); + get_unaligned_be32(src)); /* Write the length of the Frame */ - tx_ctrl.nt = length; + tx_ctrl_value |= length << TX_CTL_NT_SHIFT; /* Indicate SW is done */ priv->tx_packet_sent = true; - tx_ctrl.ct = NPS_ENET_ENABLE; - + tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT; /* Send Frame */ - nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl.value); + nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value); } /** @@ -422,19 +431,23 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p) static void nps_enet_set_rx_mode(struct net_device *ndev) { struct nps_enet_priv *priv = netdev_priv(ndev); - struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2; - - ge_mac_cfg_2.value = priv->ge_mac_cfg_2.value; + u32 ge_mac_cfg_2_value = priv->ge_mac_cfg_2_value; if (ndev->flags & IFF_PROMISC) { - ge_mac_cfg_2.disc_da = NPS_ENET_DISABLE; - ge_mac_cfg_2.disc_mc = NPS_ENET_DISABLE; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) + | NPS_ENET_DISABLE << CFG_2_DISK_DA_SHIFT; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) + | NPS_ENET_DISABLE << CFG_2_DISK_MC_SHIFT; + } else { - ge_mac_cfg_2.disc_da = NPS_ENET_ENABLE; - ge_mac_cfg_2.disc_mc = NPS_ENET_ENABLE; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_DA_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT; + ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK) + | NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT; + } - nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2.value); + nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value); } /** @@ -453,12 +466,15 @@ static s32 nps_enet_open(struct net_device *ndev) /* Reset private variables */ priv->tx_packet_sent = false; - priv->ge_mac_cfg_2.value = 0; - priv->ge_mac_cfg_3.value = 0; + priv->ge_mac_cfg_2_value = 0; + priv->ge_mac_cfg_3_value = 0; /* ge_mac_cfg_3 default values */ - priv->ge_mac_cfg_3.rx_ifg_th = NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH; - priv->ge_mac_cfg_3.max_len = NPS_ENET_GE_MAC_CFG_3_MAX_LEN; + priv->ge_mac_cfg_3_value |= + NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT; + + priv->ge_mac_cfg_3_value |= + NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT; /* Disable HW device */ nps_enet_hw_disable_control(ndev); diff --git a/drivers/net/ethernet/ezchip/nps_enet.h b/drivers/net/ethernet/ezchip/nps_enet.h index 6703674d679c..d0cab600bce8 100644 --- a/drivers/net/ethernet/ezchip/nps_enet.h +++ b/drivers/net/ethernet/ezchip/nps_enet.h @@ -43,233 +43,123 @@ #define NPS_ENET_REG_GE_RST 0x1400 #define NPS_ENET_REG_PHASE_FIFO_CTL 0x1404 -/* Tx control register */ -struct nps_enet_tx_ctl { - union { - /* ct: SW sets to indicate frame ready in Tx buffer for - * transmission. HW resets to when transmission done - * et: Transmit error - * nt: Length in bytes of Tx frame loaded to Tx buffer - */ - struct { - u32 - __reserved_1:16, - ct:1, - et:1, - __reserved_2:3, - nt:11; - }; - - u32 value; - }; -}; - -/* Rx control register */ -struct nps_enet_rx_ctl { - union { - /* cr: HW sets to indicate frame ready in Rx buffer. - * SW resets to indicate host read received frame - * and new frames can be written to Rx buffer - * er: Rx error indication - * crc: Rx CRC error indication - * nr: Length in bytes of Rx frame loaded by MAC to Rx buffer - */ - struct { - u32 - __reserved_1:16, - cr:1, - er:1, - crc:1, - __reserved_2:2, - nr:11; - }; - - u32 value; - }; -}; - -/* Interrupt enable for data buffer events register */ -struct nps_enet_buf_int_enable { - union { - /* tx_done: Interrupt generation in the case when new frame - * is ready in Rx buffer - * rx_rdy: Interrupt generation in the case when current frame - * was read from TX buffer - */ - struct { - u32 - __reserved:30, - tx_done:1, - rx_rdy:1; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 0 register */ -struct nps_enet_ge_mac_cfg_0 { - union { - /* tx_pr_len: Transmit preamble length in bytes - * tx_ifg_nib: Tx idle pattern - * nib_mode: Nibble (4-bit) Mode - * rx_pr_check_en: Receive preamble Check Enable - * tx_ifg: Transmit inter-Frame Gap - * rx_ifg: Receive inter-Frame Gap - * tx_fc_retr: Transmit Flow Control Retransmit Mode - * rx_length_check_en: Receive Length Check Enable - * rx_crc_ignore: Results of the CRC check are ignored - * rx_crc_strip: MAC strips the CRC from received frames - * rx_fc_en: Receive Flow Control Enable - * tx_crc_en: Transmit CRC Enabled - * tx_pad_en: Transmit Padding Enable - * tx_cf_en: Transmit Flow Control Enable - * tx_en: Transmit Enable - * rx_en: Receive Enable - */ - struct { - u32 - tx_pr_len:4, - tx_ifg_nib:4, - nib_mode:1, - rx_pr_check_en:1, - tx_ifg:6, - rx_ifg:4, - tx_fc_retr:3, - rx_length_check_en:1, - rx_crc_ignore:1, - rx_crc_strip:1, - rx_fc_en:1, - tx_crc_en:1, - tx_pad_en:1, - tx_fc_en:1, - tx_en:1, - rx_en:1; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 1 register */ -struct nps_enet_ge_mac_cfg_1 { - union { - /* octet_3: MAC address octet 3 - * octet_2: MAC address octet 2 - * octet_1: MAC address octet 1 - * octet_0: MAC address octet 0 - */ - struct { - u32 - octet_3:8, - octet_2:8, - octet_1:8, - octet_0:8; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 2 register */ -struct nps_enet_ge_mac_cfg_2 { - union { - /* transmit_flush_en: MAC flush enable - * stat_en: RMON statistics interface enable - * disc_da: Discard frames with DA different - * from MAC address - * disc_bc: Discard broadcast frames - * disc_mc: Discard multicast frames - * octet_5: MAC address octet 5 - * octet_4: MAC address octet 4 - */ - struct { - u32 - transmit_flush_en:1, - __reserved_1:5, - stat_en:2, - __reserved_2:1, - disc_da:1, - disc_bc:1, - disc_mc:1, - __reserved_3:4, - octet_5:8, - octet_4:8; - }; - - u32 value; - }; -}; - -/* Gbps Eth MAC Configuration 3 register */ -struct nps_enet_ge_mac_cfg_3 { - union { - /* ext_oob_cbfc_sel: Selects one of the 4 profiles for - * extended OOB in-flow-control indication - * max_len: Maximum receive frame length in bytes - * tx_cbfc_en: Enable transmission of class-based - * flow control packets - * rx_ifg_th: Threshold for IFG status reporting via OOB - * cf_timeout: Configurable time to decrement FC counters - * cf_drop: Drop control frames - * redirect_cbfc_sel: Selects one of CBFC redirect profiles - * rx_cbfc_redir_en: Enable Rx class-based flow - * control redirect - * rx_cbfc_en: Enable Rx class-based flow control - * tm_hd_mode: TM header mode - */ - struct { - u32 - ext_oob_cbfc_sel:2, - max_len:14, - tx_cbfc_en:1, - rx_ifg_th:5, - cf_timeout:4, - cf_drop:1, - redirect_cbfc_sel:2, - rx_cbfc_redir_en:1, - rx_cbfc_en:1, - tm_hd_mode:1; - }; - - u32 value; - }; -}; - -/* GE MAC, PCS reset control register */ -struct nps_enet_ge_rst { - union { - /* gmac_0: GE MAC reset - * spcs_0: SGMII PCS reset - */ - struct { - u32 - __reserved_1:23, - gmac_0:1, - __reserved_2:7, - spcs_0:1; - }; - - u32 value; - }; -}; - -/* Tx phase sync FIFO control register */ -struct nps_enet_phase_fifo_ctl { - union { - /* init: initialize serdes TX phase sync FIFO pointers - * rst: reset serdes TX phase sync FIFO - */ - struct { - u32 - __reserved:30, - init:1, - rst:1; - }; - - u32 value; - }; -}; +/* Tx control register masks and shifts */ +#define TX_CTL_NT_MASK 0x7FF +#define TX_CTL_NT_SHIFT 0 +#define TX_CTL_ET_MASK 0x4000 +#define TX_CTL_ET_SHIFT 14 +#define TX_CTL_CT_MASK 0x8000 +#define TX_CTL_CT_SHIFT 15 + +/* Rx control register masks and shifts */ +#define RX_CTL_NR_MASK 0x7FF +#define RX_CTL_NR_SHIFT 0 +#define RX_CTL_CRC_MASK 0x2000 +#define RX_CTL_CRC_SHIFT 13 +#define RX_CTL_ER_MASK 0x4000 +#define RX_CTL_ER_SHIFT 14 +#define RX_CTL_CR_MASK 0x8000 +#define RX_CTL_CR_SHIFT 15 + +/* Interrupt enable for data buffer events register masks and shifts */ +#define RX_RDY_MASK 0x1 +#define RX_RDY_SHIFT 0 +#define TX_DONE_MASK 0x2 +#define TX_DONE_SHIFT 1 + +/* Gbps Eth MAC Configuration 0 register masks and shifts */ +#define CFG_0_RX_EN_MASK 0x1 +#define CFG_0_RX_EN_SHIFT 0 +#define CFG_0_TX_EN_MASK 0x2 +#define CFG_0_TX_EN_SHIFT 1 +#define CFG_0_TX_FC_EN_MASK 0x4 +#define CFG_0_TX_FC_EN_SHIFT 2 +#define CFG_0_TX_PAD_EN_MASK 0x8 +#define CFG_0_TX_PAD_EN_SHIFT 3 +#define CFG_0_TX_CRC_EN_MASK 0x10 +#define CFG_0_TX_CRC_EN_SHIFT 4 +#define CFG_0_RX_FC_EN_MASK 0x20 +#define CFG_0_RX_FC_EN_SHIFT 5 +#define CFG_0_RX_CRC_STRIP_MASK 0x40 +#define CFG_0_RX_CRC_STRIP_SHIFT 6 +#define CFG_0_RX_CRC_IGNORE_MASK 0x80 +#define CFG_0_RX_CRC_IGNORE_SHIFT 7 +#define CFG_0_RX_LENGTH_CHECK_EN_MASK 0x100 +#define CFG_0_RX_LENGTH_CHECK_EN_SHIFT 8 +#define CFG_0_TX_FC_RETR_MASK 0xE00 +#define CFG_0_TX_FC_RETR_SHIFT 9 +#define CFG_0_RX_IFG_MASK 0xF000 +#define CFG_0_RX_IFG_SHIFT 12 +#define CFG_0_TX_IFG_MASK 0x3F0000 +#define CFG_0_TX_IFG_SHIFT 16 +#define CFG_0_RX_PR_CHECK_EN_MASK 0x400000 +#define CFG_0_RX_PR_CHECK_EN_SHIFT 22 +#define CFG_0_NIB_MODE_MASK 0x800000 +#define CFG_0_NIB_MODE_SHIFT 23 +#define CFG_0_TX_IFG_NIB_MASK 0xF000000 +#define CFG_0_TX_IFG_NIB_SHIFT 24 +#define CFG_0_TX_PR_LEN_MASK 0xF0000000 +#define CFG_0_TX_PR_LEN_SHIFT 28 + +/* Gbps Eth MAC Configuration 1 register masks and shifts */ +#define CFG_1_OCTET_0_MASK 0x000000FF +#define CFG_1_OCTET_0_SHIFT 0 +#define CFG_1_OCTET_1_MASK 0x0000FF00 +#define CFG_1_OCTET_1_SHIFT 8 +#define CFG_1_OCTET_2_MASK 0x00FF0000 +#define CFG_1_OCTET_2_SHIFT 16 +#define CFG_1_OCTET_3_MASK 0xFF000000 +#define CFG_1_OCTET_3_SHIFT 24 + +/* Gbps Eth MAC Configuration 2 register masks and shifts */ +#define CFG_2_OCTET_4_MASK 0x000000FF +#define CFG_2_OCTET_4_SHIFT 0 +#define CFG_2_OCTET_5_MASK 0x0000FF00 +#define CFG_2_OCTET_5_SHIFT 8 +#define CFG_2_DISK_MC_MASK 0x00100000 +#define CFG_2_DISK_MC_SHIFT 20 +#define CFG_2_DISK_BC_MASK 0x00200000 +#define CFG_2_DISK_BC_SHIFT 21 +#define CFG_2_DISK_DA_MASK 0x00400000 +#define CFG_2_DISK_DA_SHIFT 22 +#define CFG_2_STAT_EN_MASK 0x3000000 +#define CFG_2_STAT_EN_SHIFT 24 +#define CFG_2_TRANSMIT_FLUSH_EN_MASK 0x80000000 +#define CFG_2_TRANSMIT_FLUSH_EN_SHIFT 31 + +/* Gbps Eth MAC Configuration 3 register masks and shifts */ +#define CFG_3_TM_HD_MODE_MASK 0x1 +#define CFG_3_TM_HD_MODE_SHIFT 0 +#define CFG_3_RX_CBFC_EN_MASK 0x2 +#define CFG_3_RX_CBFC_EN_SHIFT 1 +#define CFG_3_RX_CBFC_REDIR_EN_MASK 0x4 +#define CFG_3_RX_CBFC_REDIR_EN_SHIFT 2 +#define CFG_3_REDIRECT_CBFC_SEL_MASK 0x18 +#define CFG_3_REDIRECT_CBFC_SEL_SHIFT 3 +#define CFG_3_CF_DROP_MASK 0x20 +#define CFG_3_CF_DROP_SHIFT 5 +#define CFG_3_CF_TIMEOUT_MASK 0x3C0 +#define CFG_3_CF_TIMEOUT_SHIFT 6 +#define CFG_3_RX_IFG_TH_MASK 0x7C00 +#define CFG_3_RX_IFG_TH_SHIFT 10 +#define CFG_3_TX_CBFC_EN_MASK 0x8000 +#define CFG_3_TX_CBFC_EN_SHIFT 15 +#define CFG_3_MAX_LEN_MASK 0x3FFF0000 +#define CFG_3_MAX_LEN_SHIFT 16 +#define CFG_3_EXT_OOB_CBFC_SEL_MASK 0xC0000000 +#define CFG_3_EXT_OOB_CBFC_SEL_SHIFT 30 + +/* GE MAC, PCS reset control register masks and shifts */ +#define RST_SPCS_MASK 0x1 +#define RST_SPCS_SHIFT 0 +#define RST_GMAC_0_MASK 0x100 +#define RST_GMAC_0_SHIFT 8 + +/* Tx phase sync FIFO control register masks and shifts */ +#define PHASE_FIFO_CTL_RST_MASK 0x1 +#define PHASE_FIFO_CTL_RST_SHIFT 0 +#define PHASE_FIFO_CTL_INIT_MASK 0x2 +#define PHASE_FIFO_CTL_INIT_SHIFT 1 /** * struct nps_enet_priv - Storage of ENET's private information. @@ -285,8 +175,8 @@ struct nps_enet_priv { bool tx_packet_sent; struct sk_buff *tx_skb; struct napi_struct napi; - struct nps_enet_ge_mac_cfg_2 ge_mac_cfg_2; - struct nps_enet_ge_mac_cfg_3 ge_mac_cfg_3; + u32 ge_mac_cfg_2_value; + u32 ge_mac_cfg_3_value; }; /** -- cgit v1.2.3 From 28466e0f2463a1084781c9312cd9148b2530eea7 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 4 Mar 2016 11:52:17 +0100 Subject: cxgb4i: don't redefine DIV_ROUND_UP let's use the common definition to avoid the following warning during the compilation: drivers/scsi/cxgbi/cxgb4i/cxgb4i.c:161:0: warning: "DIV_ROUND_UP" redefined #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) ^ In file included from include/linux/list.h:8:0, from include/linux/module.h:9, from drivers/scsi/cxgbi/cxgb4i/cxgb4i.c:16: include/linux/kernel.h:67:0: note: this is the location of the previous definition #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP ^ Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- drivers/scsi/cxgbi/cxgb4i/cxgb4i.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 804806e1cbb4..339f6b7f4803 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -13,6 +13,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ +#include #include #include #include @@ -158,7 +159,6 @@ static struct scsi_transport_template *cxgb4i_stt; * open/close/abort and data send/receive. */ -#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #define RCV_BUFSIZ_MASK 0x3FFU #define MAX_IMM_TX_PKT_LEN 256 -- cgit v1.2.3 From 166cc7136752192bb6e1f2be2e56614762292a00 Mon Sep 17 00:00:00 2001 From: Nicolas Dichtel Date: Fri, 4 Mar 2016 11:52:18 +0100 Subject: drm/vmwgfx: remove userland definition of DIV_ROUND_UP Let's use __KERNEL_DIV_ROUND_UP, which is defined in uapi/linux/kernel.h. Signed-off-by: Nicolas Dichtel Signed-off-by: David S. Miller --- .../drm/vmwgfx/device_include/svga3d_surfacedefs.h | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h index 58704f0a4607..531d22025fec 100644 --- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h +++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h @@ -25,6 +25,8 @@ * **************************************************************************/ +#include + #ifdef __KERNEL__ #include @@ -36,7 +38,6 @@ #define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0])) #endif /* ARRAY_SIZE */ -#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y)) #define max_t(type, x, y) ((x) > (y) ? (x) : (y)) #define surf_size_struct SVGA3dSize #define u32 uint32 @@ -987,12 +988,12 @@ svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc, const surf_size_struct *pixel_size, surf_size_struct *block_size) { - block_size->width = DIV_ROUND_UP(pixel_size->width, - desc->block_size.width); - block_size->height = DIV_ROUND_UP(pixel_size->height, - desc->block_size.height); - block_size->depth = DIV_ROUND_UP(pixel_size->depth, - desc->block_size.depth); + block_size->width = __KERNEL_DIV_ROUND_UP(pixel_size->width, + desc->block_size.width); + block_size->height = __KERNEL_DIV_ROUND_UP(pixel_size->height, + desc->block_size.height); + block_size->depth = __KERNEL_DIV_ROUND_UP(pixel_size->depth, + desc->block_size.depth); } static inline bool @@ -1100,8 +1101,9 @@ svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format, const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format); const u32 bw = desc->block_size.width, bh = desc->block_size.height; const u32 bd = desc->block_size.depth; - const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block; - const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride; + const u32 rowstride = __KERNEL_DIV_ROUND_UP(width, bw) * + desc->bytes_per_block; + const u32 imgstride = __KERNEL_DIV_ROUND_UP(height, bh) * rowstride; const u32 offset = (z / bd * imgstride + y / bh * rowstride + x / bw * desc->bytes_per_block); -- cgit v1.2.3 From 6297b91c7fb530d3f2543a8e25d68ccf75bfb139 Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Thu, 3 Mar 2016 01:16:54 +0000 Subject: vxlan: use reset to set header pointers Since offset is zero, it's not necessary to use set function. Reset function is straightforward, and will remove the unnecessary add operation in set function. Signed-off-by: Zhang Shengju Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 775ddb48388d..8ca243d93b78 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1460,7 +1460,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, reply->dev = dev; skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); skb_push(reply, sizeof(struct ethhdr)); - skb_set_mac_header(reply, 0); + skb_reset_mac_header(reply); ns = (struct nd_msg *)skb_transport_header(request); @@ -1480,7 +1480,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, reply->protocol = htons(ETH_P_IPV6); skb_pull(reply, sizeof(struct ethhdr)); - skb_set_network_header(reply, 0); + skb_reset_network_header(reply); skb_put(reply, sizeof(struct ipv6hdr)); /* IPv6 header */ @@ -1495,7 +1495,7 @@ static struct sk_buff *vxlan_na_create(struct sk_buff *request, pip6->saddr = *(struct in6_addr *)n->primary_key; skb_pull(reply, sizeof(struct ipv6hdr)); - skb_set_transport_header(reply, 0); + skb_reset_transport_header(reply); na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); -- cgit v1.2.3 From 6b163a85478b27748bad219fe3ead7ac87217265 Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Thu, 3 Mar 2016 01:16:55 +0000 Subject: mac80211_hwsim: use reset to set mac header Since offset is zero, it's not necessary to use set function. Reset function is straightforward, and will remove the unnecessary add operation in set function. Signed-off-by: Zhang Shengju Signed-off-by: David S. Miller --- drivers/net/wireless/mac80211_hwsim.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a723a85f5635..e85e0737771c 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -844,7 +844,7 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, hdr->rt_chbitmask = cpu_to_le16(flags); skb->dev = hwsim_mon; - skb_set_mac_header(skb, 0); + skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); @@ -887,7 +887,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, memcpy(hdr11->addr1, addr, ETH_ALEN); skb->dev = hwsim_mon; - skb_set_mac_header(skb, 0); + skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); -- cgit v1.2.3 From 4479a899f75cdbd0406d29701d459be5cadaf00b Mon Sep 17 00:00:00 2001 From: Luca Coelho Date: Mon, 8 Feb 2016 11:18:46 +0200 Subject: iwlwifi: pcie: forbid RTPM on device removal The pci driver keeps any unbound device in active state and forbids runtime PM. When our driver gets probed, we take control of the state. When the device is released (i.e. during unbind or module removal), we should return the state to what it was before. To do so, we need to forbid RTPM in the driver remove op. Additionally, remove an unnecessary pm_runtime_disable() call, move the initial ref_count setting to a better place and add some comments explaining what is going on. Signed-off-by: Luca Coelho Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/drv.c | 28 +++++++++++++++++++++++++ drivers/net/wireless/intel/iwlwifi/pcie/trans.c | 6 ------ 2 files changed, 28 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index d33b6baf5f98..05b968506836 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -631,13 +631,31 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* if RTPM is in use, enable it in our device */ if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { + /* We explicitly set the device to active here to + * clear contingent errors. + */ pm_runtime_set_active(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, iwlwifi_mod_params.d0i3_entry_delay); pm_runtime_use_autosuspend(&pdev->dev); + + /* We are not supposed to call pm_runtime_allow() by + * ourselves, but let userspace enable runtime PM via + * sysfs. However, since we don't enable this from + * userspace yet, we need to allow/forbid() ourselves. + */ pm_runtime_allow(&pdev->dev); } + /* The PCI device starts with a reference taken and we are + * supposed to release it here. But to simplify the + * interaction with the opmode, we don't do it now, but let + * the opmode release it when it's ready. To account for this + * reference, we start with ref_count set to 1. + */ + trans_pcie->ref_count = 1; + return 0; out_free_drv: @@ -652,7 +670,17 @@ static void iwl_pci_remove(struct pci_dev *pdev) struct iwl_trans *trans = pci_get_drvdata(pdev); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + /* if RTPM was in use, restore it to the state before probe */ + if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) { + /* We should not call forbid here, but we do for now. + * Check the comment to pm_runtime_allow() in + * iwl_pci_probe(). + */ + pm_runtime_forbid(trans->dev); + } + iwl_drv_stop(trans_pcie->drv); + iwl_trans_pcie_free(trans); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index e67957d6ac79..eb39c7e09781 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1646,9 +1646,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, trans->command_groups = trans_cfg->command_groups; trans->command_groups_size = trans_cfg->command_groups_size; - /* init ref_count to 1 (should be cleared when ucode is loaded) */ - trans_pcie->ref_count = 1; - /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. * As this function may be called again in some corner cases don't @@ -1663,9 +1660,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - /* TODO: check if this is really needed */ - pm_runtime_disable(trans->dev); - iwl_pcie_synchronize_irqs(trans); iwl_pcie_tx_free(trans); -- cgit v1.2.3 From d56daea43c5a5d72e67ac60d97734d5823b457d6 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 15 Feb 2016 19:30:49 +0200 Subject: iwlwifi: pcie: refactor RXBs reclaiming code Change the code to move rxbs directly from the allocator's list to the queue's free list. This makes the code more readable, saves the interim array and the double loop over the free RBs. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 75 ++++++++++++---------------- 1 file changed, 33 insertions(+), 42 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 489b07a9e471..a310fd265e24 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -539,40 +539,46 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) } /* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages + * iwl_pcie_rx_allocator_get - returns the pre-allocated pages .* .* Called by queue when the queue posted allocation request and * has freed 8 RBDs in order to restock itself. + * This function directly moves the allocated RBs to the queue's ownership + * and updates the relevant counters. */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) +static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; + lockdep_assert_held(&rxq->lock); + /* * atomic_dec_if_positive returns req_ready - 1 for any scenario. * If req_ready is 0 atomic_dec_if_positive will return -1 and this - * function will return -ENOMEM, as there are no ready requests. + * function will return early, as there are no ready requests. * atomic_dec_if_positive will perofrm the *actual* decrement only if * req_ready > 0, i.e. - there are ready requests and the function * hands one request to the caller. */ if (atomic_dec_if_positive(&rba->req_ready) < 0) - return -ENOMEM; + return; spin_lock(&rba->lock); for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); + struct iwl_rx_mem_buffer *rxb = + list_first_entry(&rba->rbd_allocated, + struct iwl_rx_mem_buffer, list); + + list_move(&rxb->list, &rxq->rx_free); } spin_unlock(&rba->lock); - return 0; + rxq->used_count -= RX_CLAIM_REQ_ALLOC; + rxq->free_count += RX_CLAIM_REQ_ALLOC; } static void iwl_pcie_rx_allocator_work(struct work_struct *data) @@ -1149,7 +1155,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq[queue]; - u32 r, i, j, count = 0; + u32 r, i, count = 0; bool emergency = false; restart: @@ -1193,39 +1199,24 @@ restart: i = (i + 1) & (rxq->queue_size - 1); - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { + /* + * If we have RX_CLAIM_REQ_ALLOC released rx buffers - + * try to claim the pre-allocated buffers from the allocator. + * If not ready - will try to reclaim next time. + * There is no need to reschedule work - allocator exits only + * on success + */ + if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) + iwl_pcie_rx_allocator_get(trans, rxq); + + if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) { struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && - !emergency) { - /* Add the remaining 6 empty RBDs - * for allocator use - */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, - &rba->rbd_empty); - spin_unlock(&rba->lock); - } - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } - } - } - if (emergency) { + /* Add the remaining empty RBDs for allocator use */ + spin_lock(&rba->lock); + list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); + spin_unlock(&rba->lock); + } else if (emergency) { count++; if (count == 8) { count = 0; -- cgit v1.2.3 From e5f91d91ac2e09f93f58ab8f6813d12f2b3afa03 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Wed, 2 Mar 2016 15:17:28 +0200 Subject: iwlwifi: pcie: set RB chunk size back to 64 128 byte chunk size is supported only on PCIe and not on IOSF. For now, change it back to 64 byte. Reported-by: Oren Givon Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a310fd265e24..509f79003241 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -801,11 +801,10 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) /* * Activate DMA snooping. - * Set RX DMA chunk size to 128 bit + * Set RX DMA chunk size to 64B * Default queue is 0 */ iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | - RFH_GEN_CFG_RB_CHUNK_SIZE | (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | RFH_GEN_CFG_SERVICE_DMA_SNOOP); /* Enable the relevant rx queues */ -- cgit v1.2.3 From 91f66a3c672065a3aca387ca7e0028504bb8f457 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Sun, 28 Feb 2016 10:15:08 +0200 Subject: iwlwifi: mvm: avoid panics with thermal device usage Thermal zone device registration can fail, and in this case we don't want to remove WiFi functionality. This is why the thermal zone registration function is void, and the flows continue even if the thermal zone device registration failed. Same applies for the cooling device. This means that we at least need to remember that the thermal zone device didn't register properly and take the minimal precautions to avoid panic'ing when we access it. This was missing. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 - drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 57 +++++++++++++++------------- 2 files changed, 31 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index ab410b4659f3..b869db9afc52 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1575,7 +1575,6 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state); int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp); void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm); -int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm); int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget); /* Location Aware Regulatory */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 999bcb898be8..0a02e9835d6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -211,10 +211,14 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) * the firmware and hence to take the mutex. * Avoid the deadlock by unlocking the mutex here. */ - mutex_unlock(&mvm->mutex); - thermal_notify_framework(mvm->tz_device.tzone, - mvm->tz_device.fw_trips_index[ths_crossed]); - mutex_lock(&mvm->mutex); + if (mvm->tz_device.tzone) { + struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device; + + mutex_unlock(&mvm->mutex); + thermal_notify_framework(tz_dev->tzone, + tz_dev->fw_trips_index[ths_crossed]); + mutex_lock(&mvm->mutex); + } #endif /* CONFIG_THERMAL */ } @@ -520,16 +524,20 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); + if (!mvm->tz_device.tzone) + return -EINVAL; + /* The driver holds array of temperature trips that are unsorted * and uncompressed, the FW should get it compressed and sorted */ /* compress temp_trips to cmd array, remove uninitialized values*/ - for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) + for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) { if (mvm->tz_device.temp_trips[i] != S16_MIN) { cmd.thresholds[idx++] = cpu_to_le16(mvm->tz_device.temp_trips[i]); } + } cmd.num_temps = cpu_to_le32(idx); if (!idx) @@ -696,6 +704,7 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) IWL_DEBUG_TEMP(mvm, "Failed to register to thermal zone (err = %ld)\n", PTR_ERR(mvm->tz_device.tzone)); + mvm->tz_device.tzone = NULL; return; } @@ -750,6 +759,10 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget) return ret; } + /* can happen if the registration failed */ + if (!mvm->cooling_dev.cdev) + return -EINVAL; + if (op == CTDP_CMD_OPERATION_START) mvm->cooling_dev.cur_state = budget; @@ -812,15 +825,12 @@ static struct thermal_cooling_device_ops tcooling_ops = { .set_cur_state = iwl_mvm_tcool_set_cur_state, }; -int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) +static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) { char name[] = "iwlwifi"; - if (!iwl_mvm_is_ctdp_supported(mvm)) { - mvm->cooling_dev.cdev = NULL; - - return 0; - } + if (!iwl_mvm_is_ctdp_supported(mvm)) + return; BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH); @@ -833,34 +843,29 @@ int iwl_mvm_cooling_device_register(struct iwl_mvm *mvm) IWL_DEBUG_TEMP(mvm, "Failed to register to cooling device (err = %ld)\n", PTR_ERR(mvm->cooling_dev.cdev)); - return PTR_ERR(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; + return; } - - return 0; } static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm) { - if (!iwl_mvm_is_tt_in_fw(mvm)) + if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone) return; - if (mvm->tz_device.tzone) { - IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); - thermal_zone_device_unregister(mvm->tz_device.tzone); - mvm->tz_device.tzone = NULL; - } + IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n"); + thermal_zone_device_unregister(mvm->tz_device.tzone); + mvm->tz_device.tzone = NULL; } static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm) { - if (!iwl_mvm_is_ctdp_supported(mvm)) + if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev) return; - if (mvm->cooling_dev.cdev) { - IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); - thermal_cooling_device_unregister(mvm->cooling_dev.cdev); - mvm->cooling_dev.cdev = NULL; - } + IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n"); + thermal_cooling_device_unregister(mvm->cooling_dev.cdev); + mvm->cooling_dev.cdev = NULL; } #endif /* CONFIG_THERMAL */ -- cgit v1.2.3 From 4c3e962df044c34d25403f8f58ff7b34c4966b4a Mon Sep 17 00:00:00 2001 From: Wu Fengguang Date: Thu, 3 Mar 2016 09:55:19 +0800 Subject: stmmac: fix noderef.cocci warnings drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c:115:15-21: ERROR: application of sizeof to pointer sizeof when applied to a pointer typed expression gives the size of the pointer Generated by: scripts/coccinelle/misc/noderef.cocci CC: Giuseppe Cavallaro Signed-off-by: Fengguang Wu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 69ccf486d4fa..9cf181f839fd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -112,7 +112,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev) if (!np) return NULL; - axi = kzalloc(sizeof(axi), GFP_KERNEL); + axi = kzalloc(sizeof(*axi), GFP_KERNEL); if (!axi) return ERR_PTR(-ENOMEM); -- cgit v1.2.3 From 962d8cdc3133435aed2928637f73e272128a326c Mon Sep 17 00:00:00 2001 From: Bernhard Walle Date: Thu, 3 Mar 2016 10:15:55 +0100 Subject: net: fec: Rename "phy-reset-active-low" property is actually "active high". Thanks for Troy Kisky for pointing that out. Since the patch is in linux-next, this patch is incremental and doesn't replace the original patch. Signed-off-by: Bernhard Walle Acked-by: Fugang Duan Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/net/fsl-fec.txt | 2 +- drivers/net/ethernet/freescale/fec_main.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt index a4799fff0d1f..b037a9d78d93 100644 --- a/Documentation/devicetree/bindings/net/fsl-fec.txt +++ b/Documentation/devicetree/bindings/net/fsl-fec.txt @@ -12,7 +12,7 @@ Optional properties: only if property "phy-reset-gpios" is available. Missing the property will have the duration be 1 millisecond. Numbers greater than 1000 are invalid and 1 millisecond will be used instead. -- phy-reset-active-low : If present then the reset sequence using the GPIO +- phy-reset-active-high : If present then the reset sequence using the GPIO specified in the "phy-reset-gpios" property is reversed (H=reset state, L=operation state). - phy-supply : regulator that powers the Ethernet PHY. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index bad0ba29a94a..37c081583084 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3191,7 +3191,7 @@ static int fec_enet_init(struct net_device *ndev) static void fec_reset_phy(struct platform_device *pdev) { int err, phy_reset; - bool active_low = false; + bool active_high = false; int msec = 1; struct device_node *np = pdev->dev.of_node; @@ -3207,17 +3207,17 @@ static void fec_reset_phy(struct platform_device *pdev) if (!gpio_is_valid(phy_reset)) return; - active_low = of_property_read_bool(np, "phy-reset-active-low"); + active_high = of_property_read_bool(np, "phy-reset-active-high"); err = devm_gpio_request_one(&pdev->dev, phy_reset, - active_low ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, + active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, "phy-reset"); if (err) { dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); return; } msleep(msec); - gpio_set_value_cansleep(phy_reset, !active_low); + gpio_set_value_cansleep(phy_reset, !active_high); } #else /* CONFIG_OF */ static void fec_reset_phy(struct platform_device *pdev) -- cgit v1.2.3 From afd5b1704cb15eabc4358b7b0dd7f33099408eaa Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 1 Mar 2016 12:18:22 +0200 Subject: iwlwifi: refactor the code that reads the MAC address from the NVM It makes it slightly easier to follow. Pass the pointer to the transport which allows to read WFMP_MAC_ADDR_X register only when needed and to use IWL_ERR instead of the less commonly used IWL_ERR_DEV logger macro. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 97 ++++++++++++---------- drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h | 5 +- drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | 11 +-- 3 files changed, 55 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 348135792f3e..5e6b90da3179 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -69,6 +70,7 @@ #include "iwl-drv.h" #include "iwl-modparams.h" #include "iwl-nvm-parse.h" +#include "iwl-prph.h" /* NVM offsets (in words) definitions */ enum wkp_nvm_offsets { @@ -522,27 +524,11 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg); } -static void iwl_set_hw_address(const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, - const __le16 *nvm_sec) -{ - const u8 *hw_addr = (const u8 *)(nvm_sec + HW_ADDR); - - /* The byte order is little endian 16 bit, meaning 214365 */ - data->hw_addr[0] = hw_addr[1]; - data->hw_addr[1] = hw_addr[0]; - data->hw_addr[2] = hw_addr[3]; - data->hw_addr[3] = hw_addr[2]; - data->hw_addr[4] = hw_addr[5]; - data->hw_addr[5] = hw_addr[4]; -} - -static void iwl_set_hw_address_family_8000(struct device *dev, +static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *mac_override, - const __le16 *nvm_hw, - __le32 mac_addr0, __le32 mac_addr1) + const __le16 *nvm_hw) { const u8 *hw_addr; @@ -568,11 +554,16 @@ static void iwl_set_hw_address_family_8000(struct device *dev, memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0) return; - IWL_ERR_DEV(dev, - "mac address from nvm override section is not valid\n"); + IWL_ERR(trans, + "mac address from nvm override section is not valid\n"); } if (nvm_hw) { + /* read the mac address from WFMP registers */ + __le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans, + WFMP_MAC_ADDR_0)); + __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, + WFMP_MAC_ADDR_1)); /* read the MAC address from HW resisters */ hw_addr = (const u8 *)&mac_addr0; data->hw_addr[0] = hw_addr[3]; @@ -585,28 +576,50 @@ static void iwl_set_hw_address_family_8000(struct device *dev, data->hw_addr[5] = hw_addr[0]; if (!is_valid_ether_addr(data->hw_addr)) - IWL_ERR_DEV(dev, - "mac address (%pM) from hw section is not valid\n", - data->hw_addr); + IWL_ERR(trans, + "mac address (%pM) from hw section is not valid\n", + data->hw_addr); return; } - IWL_ERR_DEV(dev, "mac address is not found\n"); + IWL_ERR(trans, "mac address is not found\n"); +} + +static void iwl_set_hw_address(struct iwl_trans *trans, + const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_hw, + const __le16 *mac_override) +{ + if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); + + /* The byte order is little endian 16 bit, meaning 214365 */ + data->hw_addr[0] = hw_addr[1]; + data->hw_addr[1] = hw_addr[0]; + data->hw_addr[2] = hw_addr[3]; + data->hw_addr[3] = hw_addr[2]; + data->hw_addr[4] = hw_addr[5]; + data->hw_addr[5] = hw_addr[4]; + } else { + iwl_set_hw_address_family_8000(trans, cfg, data, + mac_override, nvm_hw); + } } struct iwl_nvm_data * -iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - __le32 mac_addr0, __le32 mac_addr1) + u8 tx_chains, u8 rx_chains, bool lar_fw_supported) { + struct device *dev = trans->dev; struct iwl_nvm_data *data; - u32 sku; - u32 radio_cfg; + bool lar_enabled; + u32 sku, radio_cfg; u16 lar_config; + const __le16 *ch_section; if (cfg->device_family != IWL_DEVICE_FAMILY_8000) data = kzalloc(sizeof(*data) + @@ -645,21 +658,16 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { /* Checking for required sections */ if (!nvm_calib) { - IWL_ERR_DEV(dev, - "Can't parse empty Calib NVM sections\n"); + IWL_ERR(trans, + "Can't parse empty Calib NVM sections\n"); kfree(data); return NULL; } /* in family 8000 Xtal calibration values moved to OTP */ data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); - } - - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { - iwl_set_hw_address(cfg, data, nvm_hw); - - iwl_init_sbands(dev, cfg, data, nvm_sw, - tx_chains, rx_chains, lar_fw_supported); + lar_enabled = true; + ch_section = nvm_sw; } else { u16 lar_offset = data->nvm_version < 0xE39 ? NVM_LAR_OFFSET_FAMILY_8000_OLD : @@ -668,16 +676,13 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, lar_config = le16_to_cpup(regulatory + lar_offset); data->lar_enabled = !!(lar_config & NVM_LAR_ENABLED_FAMILY_8000); - - /* MAC address in family 8000 */ - iwl_set_hw_address_family_8000(dev, cfg, data, mac_override, - nvm_hw, mac_addr0, mac_addr1); - - iwl_init_sbands(dev, cfg, data, regulatory, - tx_chains, rx_chains, - lar_fw_supported && data->lar_enabled); + lar_enabled = data->lar_enabled; + ch_section = regulatory; } + iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override); + iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, + lar_fw_supported && lar_enabled); data->calib_version = 255; return data; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index 4e8e0dc474d4..d704d52aa7ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -74,12 +74,11 @@ * later with iwl_free_nvm_data(). */ struct iwl_nvm_data * -iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg, +iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const __le16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported, - __le32 mac_addr0, __le32 mac_addr1); + u8 tx_chains, u8 rx_chains, bool lar_fw_supported); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index c446e0da9789..25a98401a64f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -300,7 +300,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku; bool lar_enabled; - __le32 mac_addr0, mac_addr1; /* Checking for required sections */ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) { @@ -336,12 +335,6 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) if (WARN_ON(!mvm->cfg)) return NULL; - /* read the mac address from WFMP registers */ - mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, - WFMP_MAC_ADDR_0)); - mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans, - WFMP_MAC_ADDR_1)); - hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data; sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; @@ -354,10 +347,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib, + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, regulatory, mac_override, phy_sku, mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled, mac_addr0, mac_addr1); + lar_enabled); } #define MAX_NVM_FILE_LEN 16384 -- cgit v1.2.3 From 04ddc2aacc1c52793c1e30db5073bef6fbc08942 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Thu, 3 Mar 2016 13:31:39 +0200 Subject: iwlwifi: mvm: fix unregistration of thermal in some error flows The call to iwl_mvm_thermal_initialize() was too early in the function. Unregister will be performed when goto out_unregister is called, but as the code was - out_free may be called and leave without unregistering from thermal. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 699a80863e86..4322e8ed1159 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -611,9 +611,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, IWL_INFO(mvm, "Detected %s, REV=0x%X\n", mvm->cfg->name, mvm->trans->hw_rev); - min_backoff = calc_min_backoff(trans, cfg); - iwl_mvm_thermal_initialize(mvm, min_backoff); - if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; else @@ -666,6 +663,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (err) goto out_free; + min_backoff = calc_min_backoff(trans, cfg); + iwl_mvm_thermal_initialize(mvm, min_backoff); + err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir); if (err) goto out_unregister; -- cgit v1.2.3 From 30d915c2ecbb60d222f49e8f249d04e208d11ebc Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Thu, 3 Mar 2016 16:49:05 +0200 Subject: iwlwifi: mvm: set the correct amsdu enum values The amsdu enum values are off by 1 bit. Fix it. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h index eb9b87038e1f..7a16e55df012 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h @@ -264,9 +264,8 @@ enum iwl_rx_mpdu_mac_flags2 { }; enum iwl_rx_mpdu_amsdu_info { - IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x3f, - IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x40, - /* 0x80 bit reserved for now */ + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK = 0x7f, + IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80, }; enum iwl_rx_l3l4_flags { -- cgit v1.2.3 From 7c70fee5ae5c4fb542e432599cb85c8031b952a0 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Tue, 2 Feb 2016 11:55:53 +0200 Subject: iwlwifi: mvm: extend time event duration Before authentication, we start a time event during which we wait for a beacon in order to sync our timers. If we didn't hear the beacon during this time - we abandon the connection. However, in congested environment, it was observed we might not hear beacons in that time slot. Extend the time event to give the connection a better chance. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 ++---- drivers/net/wireless/intel/iwlwifi/mvm/time-event.h | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index ec6b07282e7d..3590835a308f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2556,10 +2556,8 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - u32 duration = min(IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS, - 200 + vif->bss_conf.beacon_int); - u32 min_duration = min(IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS, - 100 + vif->bss_conf.beacon_int); + u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; + u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; if (WARN_ON_ONCE(vif->bss_conf.assoc)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h index 99d9a35ad5b1..3d2e8b6159bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h @@ -115,7 +115,7 @@ * needed by the driver. */ -#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500 +#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600 #define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400 /** -- cgit v1.2.3 From 7b5acd11a3ffec027abf4df2b30690a974873174 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 11 Feb 2016 10:56:22 -0600 Subject: rtlwifi: rtl8821ae: Silence useless log output When driver rtl8821ae is loaded but not connected to any AP, it logs a "firmware not ready to run" message roughly once a minute. To eliminate logging this massage under normal debug conditions, the degug level needed to print this message is increased. Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c index 525eb234627c..a4fc70e8c9c0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c @@ -271,7 +271,7 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw) err = _rtl8821ae_fw_free_to_go(hw); if (err) { - RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG, "Firmware is not ready to run!\n"); } else { RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, -- cgit v1.2.3 From 9cc3fdc86c6006bae878da6d2d14c1633064a12f Mon Sep 17 00:00:00 2001 From: One Thousand Gnomes Date: Mon, 15 Feb 2016 19:04:56 +0000 Subject: rt2x00: unterminated strlen of user data The buffer needs to be zero terminated in case the user data is not. Otherwise we run off the end of the buffer. Signed-off-by: Alan Cox Reviewed-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2x00debug.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c index 25ee3cb8e982..72ae530e4a3b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00debug.c @@ -478,7 +478,7 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \ { \ struct rt2x00debug_intf *intf = file->private_data; \ const struct rt2x00debug *debug = intf->debug; \ - char line[16]; \ + char line[17]; \ size_t size; \ unsigned int index = intf->offset_##__name; \ __type value; \ @@ -494,7 +494,8 @@ static ssize_t rt2x00debug_write_##__name(struct file *file, \ \ if (copy_from_user(line, buf, length)) \ return -EFAULT; \ - \ + line[16] = 0; \ + \ size = strlen(line); \ value = simple_strtoul(line, NULL, 0); \ \ -- cgit v1.2.3 From 2f10e50e3d284ba2a4e787bfe19e89770f87a8b3 Mon Sep 17 00:00:00 2001 From: Christian Lamparter Date: Mon, 15 Feb 2016 21:16:06 +0100 Subject: carl9170: import 1.9.9 firmware headers Import new headers from my firmware branch: Signed-off-by: Christian Lamparter Signed-off-by: Kalle Valo --- drivers/net/wireless/ath/carl9170/fwcmd.h | 8 ++++ drivers/net/wireless/ath/carl9170/fwdesc.h | 6 +++ drivers/net/wireless/ath/carl9170/hw.h | 73 +++++++++++++++++++++++++++-- drivers/net/wireless/ath/carl9170/version.h | 6 +-- 4 files changed, 87 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h index 9111d4ffc1b3..ea1d80f9a50e 100644 --- a/drivers/net/wireless/ath/carl9170/fwcmd.h +++ b/drivers/net/wireless/ath/carl9170/fwcmd.h @@ -56,6 +56,7 @@ enum carl9170_cmd_oids { CARL9170_CMD_RX_FILTER = 0x07, CARL9170_CMD_WOL = 0x08, CARL9170_CMD_TALLY = 0x09, + CARL9170_CMD_WREGB = 0x0a, /* CAM */ CARL9170_CMD_EKEY = 0x10, @@ -123,6 +124,12 @@ struct carl9170_write_reg { } regs[0] __packed; } __packed; +struct carl9170_write_reg_byte { + __le32 addr; + __le32 count; + u8 val[0]; +} __packed; + #define CARL9170FW_PHY_HT_ENABLE 0x4 #define CARL9170FW_PHY_HT_DYN2040 0x8 #define CARL9170FW_PHY_HT_EXT_CHAN_OFF 0x3 @@ -226,6 +233,7 @@ struct carl9170_cmd { struct carl9170_u32_list echo; struct carl9170_reg_list rreg; struct carl9170_write_reg wreg; + struct carl9170_write_reg_byte wregb; struct carl9170_rf_init rf_init; struct carl9170_psm psm; struct carl9170_wol_cmd wol; diff --git a/drivers/net/wireless/ath/carl9170/fwdesc.h b/drivers/net/wireless/ath/carl9170/fwdesc.h index 66848d47c88e..0533f79cb998 100644 --- a/drivers/net/wireless/ath/carl9170/fwdesc.h +++ b/drivers/net/wireless/ath/carl9170/fwdesc.h @@ -81,6 +81,12 @@ enum carl9170fw_feature_list { /* Firmware will pass BA when BARs are queued */ CARL9170FW_RX_BA_FILTER, + /* Firmware has support to write a byte at a time */ + CARL9170FW_HAS_WREGB_CMD, + + /* Pattern generator */ + CARL9170FW_PATTERN_GENERATOR, + /* KEEP LAST */ __CARL9170FW_FEATURE_NUM }; diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h index 0db874abde50..08e0ae9c5836 100644 --- a/drivers/net/wireless/ath/carl9170/hw.h +++ b/drivers/net/wireless/ath/carl9170/hw.h @@ -453,9 +453,74 @@ #define AR9170_MC_REG_BASE 0x1d1000 #define AR9170_MC_REG_FLASH_WAIT_STATE (AR9170_MC_REG_BASE + 0x000) -#define AR9170_MC_REG_SEEPROM_WP0 (AR9170_MC_REG_BASE + 0x400) -#define AR9170_MC_REG_SEEPROM_WP1 (AR9170_MC_REG_BASE + 0x404) -#define AR9170_MC_REG_SEEPROM_WP2 (AR9170_MC_REG_BASE + 0x408) + +#define AR9170_SPI_REG_BASE (AR9170_MC_REG_BASE + 0x200) +#define AR9170_SPI_REG_CONTROL0 (AR9170_SPI_REG_BASE + 0x000) +#define AR9170_SPI_CONTROL0_BUSY BIT(0) +#define AR9170_SPI_CONTROL0_CMD_GO BIT(1) +#define AR9170_SPI_CONTROL0_PAGE_WR BIT(2) +#define AR9170_SPI_CONTROL0_SEQ_RD BIT(3) +#define AR9170_SPI_CONTROL0_CMD_ABORT BIT(4) +#define AR9170_SPI_CONTROL0_CMD_LEN_S 8 +#define AR9170_SPI_CONTROL0_CMD_LEN 0x00000f00 +#define AR9170_SPI_CONTROL0_RD_LEN_S 12 +#define AR9170_SPI_CONTROL0_RD_LEN 0x00007000 + +#define AR9170_SPI_REG_CONTROL1 (AR9170_SPI_REG_BASE + 0x004) +#define AR9170_SPI_CONTROL1_SCK_RATE BIT(0) +#define AR9170_SPI_CONTROL1_DRIVE_SDO BIT(1) +#define AR9170_SPI_CONTROL1_MODE_SEL_S 2 +#define AR9170_SPI_CONTROL1_MODE_SEL 0x000000c0 +#define AR9170_SPI_CONTROL1_WRITE_PROTECT BIT(4) + +#define AR9170_SPI_REG_COMMAND_PORT0 (AR9170_SPI_REG_BASE + 0x008) +#define AR9170_SPI_COMMAND_PORT0_CMD0_S 0 +#define AR9170_SPI_COMMAND_PORT0_CMD0 0x000000ff +#define AR9170_SPI_COMMAND_PORT0_CMD1_S 8 +#define AR9170_SPI_COMMAND_PORT0_CMD1 0x0000ff00 +#define AR9170_SPI_COMMAND_PORT0_CMD2_S 16 +#define AR9170_SPI_COMMAND_PORT0_CMD2 0x00ff0000 +#define AR9170_SPI_COMMAND_PORT0_CMD3_S 24 +#define AR9170_SPI_COMMAND_PORT0_CMD3 0xff000000 + +#define AR9170_SPI_REG_COMMAND_PORT1 (AR9170_SPI_REG_BASE + 0x00C) +#define AR9170_SPI_COMMAND_PORT1_CMD4_S 0 +#define AR9170_SPI_COMMAND_PORT1_CMD4 0x000000ff +#define AR9170_SPI_COMMAND_PORT1_CMD5_S 8 +#define AR9170_SPI_COMMAND_PORT1_CMD5 0x0000ff00 +#define AR9170_SPI_COMMAND_PORT1_CMD6_S 16 +#define AR9170_SPI_COMMAND_PORT1_CMD6 0x00ff0000 +#define AR9170_SPI_COMMAND_PORT1_CMD7_S 24 +#define AR9170_SPI_COMMAND_PORT1_CMD7 0xff000000 + +#define AR9170_SPI_REG_DATA_PORT (AR9170_SPI_REG_BASE + 0x010) +#define AR9170_SPI_REG_PAGE_WRITE_LEN (AR9170_SPI_REG_BASE + 0x014) + +#define AR9170_EEPROM_REG_BASE (AR9170_MC_REG_BASE + 0x400) +#define AR9170_EEPROM_REG_WP_MAGIC1 (AR9170_EEPROM_REG_BASE + 0x000) +#define AR9170_EEPROM_WP_MAGIC1 0x12345678 + +#define AR9170_EEPROM_REG_WP_MAGIC2 (AR9170_EEPROM_REG_BASE + 0x004) +#define AR9170_EEPROM_WP_MAGIC2 0x55aa00ff + +#define AR9170_EEPROM_REG_WP_MAGIC3 (AR9170_EEPROM_REG_BASE + 0x008) +#define AR9170_EEPROM_WP_MAGIC3 0x13579ace + +#define AR9170_EEPROM_REG_CLOCK_DIV (AR9170_EEPROM_REG_BASE + 0x00C) +#define AR9170_EEPROM_CLOCK_DIV_FAC_S 0 +#define AR9170_EEPROM_CLOCK_DIV_FAC 0x000001ff +#define AR9170_EEPROM_CLOCK_DIV_FAC_39KHZ 0xff +#define AR9170_EEPROM_CLOCK_DIV_FAC_78KHZ 0x7f +#define AR9170_EEPROM_CLOCK_DIV_FAC_312KHZ 0x1f +#define AR9170_EEPROM_CLOCK_DIV_FAC_10MHZ 0x0 +#define AR9170_EEPROM_CLOCK_DIV_SOFT_RST BIT(9) + +#define AR9170_EEPROM_REG_MODE (AR9170_EEPROM_REG_BASE + 0x010) +#define AR9170_EEPROM_MODE_EEPROM_SIZE_16K_PLUS BIT(31) + +#define AR9170_EEPROM_REG_WRITE_PROTECT (AR9170_EEPROM_REG_BASE + 0x014) +#define AR9170_EEPROM_WRITE_PROTECT_WP_STATUS BIT(0) +#define AR9170_EEPROM_WRITE_PROTECT_WP_SET BIT(8) /* Interrupt Controller */ #define AR9170_MAX_INT_SRC 9 @@ -589,11 +654,13 @@ #define AR9170_USB_REG_EP10_MAP (AR9170_USB_REG_BASE + 0x039) #define AR9170_USB_REG_EP_IN_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x03f) +#define AR9170_USB_EP_IN_STALL 0x8 #define AR9170_USB_EP_IN_TOGGLE 0x10 #define AR9170_USB_REG_EP_IN_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x03e) #define AR9170_USB_REG_EP_OUT_MAX_SIZE_HIGH (AR9170_USB_REG_BASE + 0x05f) +#define AR9170_USB_EP_OUT_STALL 0x8 #define AR9170_USB_EP_OUT_TOGGLE 0x10 #define AR9170_USB_REG_EP_OUT_MAX_SIZE_LOW (AR9170_USB_REG_BASE + 0x05e) diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h index 2282847d4bb8..a0410fe8c03a 100644 --- a/drivers/net/wireless/ath/carl9170/version.h +++ b/drivers/net/wireless/ath/carl9170/version.h @@ -1,7 +1,7 @@ #ifndef __CARL9170_SHARED_VERSION_H #define __CARL9170_SHARED_VERSION_H -#define CARL9170FW_VERSION_YEAR 12 -#define CARL9170FW_VERSION_MONTH 12 +#define CARL9170FW_VERSION_YEAR 16 +#define CARL9170FW_VERSION_MONTH 2 #define CARL9170FW_VERSION_DAY 15 -#define CARL9170FW_VERSION_GIT "1.9.7" +#define CARL9170FW_VERSION_GIT "1.9.9" #endif /* __CARL9170_SHARED_VERSION_H */ -- cgit v1.2.3 From 49f86ec21c01b654f6ec47f2f4567f4f9ebaa26b Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Mon, 15 Feb 2016 16:12:07 -0600 Subject: rtlwifi: Change long delays to sleeps Routine rtl_addr_delay() uses delay statements in code that can sleep. To improve system responsiveness, the various delay statements are changed. In addition, routines rtl_rfreg_delay() and rtl_bb_delay() are rewritten to use the code in rtl_addr_delay() for most of their input values. Suggested-by: Byeoungwook Kim Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/core.c | 44 ++++++++--------------------- 1 file changed, 12 insertions(+), 32 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 16bb57ccf90e..0f48048b8654 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -54,59 +54,39 @@ EXPORT_SYMBOL(channel5g_80m); void rtl_addr_delay(u32 addr) { if (addr == 0xfe) - mdelay(50); + msleep(50); else if (addr == 0xfd) - mdelay(5); + msleep(5); else if (addr == 0xfc) - mdelay(1); + msleep(1); else if (addr == 0xfb) - udelay(50); + usleep_range(50, 100); else if (addr == 0xfa) - udelay(5); + usleep_range(5, 10); else if (addr == 0xf9) - udelay(1); + usleep_range(1, 2); } EXPORT_SYMBOL(rtl_addr_delay); void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, u32 mask, u32 data) { - if (addr == 0xfe) { - mdelay(50); - } else if (addr == 0xfd) { - mdelay(5); - } else if (addr == 0xfc) { - mdelay(1); - } else if (addr == 0xfb) { - udelay(50); - } else if (addr == 0xfa) { - udelay(5); - } else if (addr == 0xf9) { - udelay(1); + if (addr >= 0xf9 && addr <= 0xfe) { + rtl_addr_delay(addr); } else { rtl_set_rfreg(hw, rfpath, addr, mask, data); - udelay(1); + usleep_range(1, 2); } } EXPORT_SYMBOL(rtl_rfreg_delay); void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data) { - if (addr == 0xfe) { - mdelay(50); - } else if (addr == 0xfd) { - mdelay(5); - } else if (addr == 0xfc) { - mdelay(1); - } else if (addr == 0xfb) { - udelay(50); - } else if (addr == 0xfa) { - udelay(5); - } else if (addr == 0xf9) { - udelay(1); + if (addr >= 0xf9 && addr <= 0xfe) { + rtl_addr_delay(addr); } else { rtl_set_bbreg(hw, addr, MASKDWORD, data); - udelay(1); + usleep_range(1, 2); } } EXPORT_SYMBOL(rtl_bb_delay); -- cgit v1.2.3 From a9eb0c4b73e7287074dfabd84337ea1a49976a3d Mon Sep 17 00:00:00 2001 From: Arend van Spriel Date: Wed, 17 Feb 2016 11:26:50 +0100 Subject: brcmfmac: change function name for brcmf_cfg80211_wait_vif_event_timeout() Dropping the '_timeout' from the function name as the fact that a timeout value is passed makes it obvious a timeout is used. Also helps to keep code lines a bit shorter and easier to stick to 80 char boundary. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 9 +++++---- .../net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h | 4 ++-- drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 16 ++++++++-------- 3 files changed, 15 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d00c5c1d58bf..5a4a0c49f933 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -564,8 +564,8 @@ struct wireless_dev *brcmf_ap_add_vif(struct wiphy *wiphy, const char *name, } /* wait for firmware event */ - err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD, - BRCMF_VIF_EVENT_TIMEOUT); + err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_ADD, + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { brcmf_err("timeout occurred\n"); @@ -6395,8 +6395,9 @@ bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg) return armed; } -int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg, - u8 action, ulong timeout) + +int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg, + u8 action, ulong timeout) { struct brcmf_cfg80211_vif_event *event = &cfg->vif_event; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 40efb539ac26..97c327d6d3bb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -402,8 +402,8 @@ bool brcmf_get_vif_state_any(struct brcmf_cfg80211_info *cfg, void brcmf_cfg80211_arm_vif_event(struct brcmf_cfg80211_info *cfg, struct brcmf_cfg80211_vif *vif); bool brcmf_cfg80211_vif_event_armed(struct brcmf_cfg80211_info *cfg); -int brcmf_cfg80211_wait_vif_event_timeout(struct brcmf_cfg80211_info *cfg, - u8 action, ulong timeout); +int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg, + u8 action, ulong timeout); s32 brcmf_notify_escan_complete(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, bool aborted, bool fw_abort); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index 821b6494f9d1..de5892a67c22 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1988,8 +1988,8 @@ int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg, brcmf_cfg80211_arm_vif_event(cfg, NULL); return err; } - err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_CHANGE, - BRCMF_VIF_EVENT_TIMEOUT); + err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_CHANGE, + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { brcmf_err("No BRCMF_E_IF_CHANGE event received\n"); @@ -2090,8 +2090,8 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p, } /* wait for firmware event */ - err = brcmf_cfg80211_wait_vif_event_timeout(p2p->cfg, BRCMF_E_IF_ADD, - BRCMF_VIF_EVENT_TIMEOUT); + err = brcmf_cfg80211_wait_vif_event(p2p->cfg, BRCMF_E_IF_ADD, + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(p2p->cfg, NULL); brcmf_fweh_p2pdev_setup(pri_ifp, false); if (!err) { @@ -2180,8 +2180,8 @@ struct wireless_dev *brcmf_p2p_add_vif(struct wiphy *wiphy, const char *name, } /* wait for firmware event */ - err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_ADD, - BRCMF_VIF_EVENT_TIMEOUT); + err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_ADD, + BRCMF_VIF_EVENT_TIMEOUT); brcmf_cfg80211_arm_vif_event(cfg, NULL); if (!err) { brcmf_err("timeout occurred\n"); @@ -2272,8 +2272,8 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev) } if (!err) { /* wait for firmware event */ - err = brcmf_cfg80211_wait_vif_event_timeout(cfg, BRCMF_E_IF_DEL, - BRCMF_VIF_EVENT_TIMEOUT); + err = brcmf_cfg80211_wait_vif_event(cfg, BRCMF_E_IF_DEL, + BRCMF_VIF_EVENT_TIMEOUT); if (!err) err = -EIO; else -- cgit v1.2.3 From d536733442d14664437dc9c3ec993d97b5a4f1b1 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:26:51 +0100 Subject: brcmfmac: Limit memory allocs to <64K Some systems have problems with allocating memory allocation larger then 64K. Often on unload/load or suspend/resume a failure is reported: Could not allocate wiphy device. This patch makes the escan intermediate storage buf dynamically allocated, and smaller than 64K. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 18 +++++++++--------- .../wireless/broadcom/brcm80211/brcmfmac/cfg80211.h | 11 ++++++----- 2 files changed, 15 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5a4a0c49f933..5478128af02b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -1125,7 +1125,7 @@ brcmf_cfg80211_escan(struct wiphy *wiphy, struct brcmf_cfg80211_vif *vif, /* Arm scan timeout timer */ mod_timer(&cfg->escan_timeout, jiffies + - WL_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); + BRCMF_ESCAN_TIMER_INTERVAL_MS * HZ / 1000); return 0; @@ -3020,7 +3020,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, list = (struct brcmf_scan_results *) cfg->escan_info.escan_buf; - if (bi_length > WL_ESCAN_BUF_SIZE - list->buflen) { + if (bi_length > BRCMF_ESCAN_BUF_SIZE - list->buflen) { brcmf_err("Buffer is too small: ignoring\n"); goto exit; } @@ -3033,8 +3033,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, bss_info_le)) goto exit; } - memcpy(&(cfg->escan_info.escan_buf[list->buflen]), - bss_info_le, bi_length); + memcpy(&cfg->escan_info.escan_buf[list->buflen], bss_info_le, + bi_length); list->version = le32_to_cpu(bss_info_le->version); list->buflen += bi_length; list->count++; @@ -5402,14 +5402,14 @@ static void brcmf_deinit_priv_mem(struct brcmf_cfg80211_info *cfg) { kfree(cfg->conf); cfg->conf = NULL; - kfree(cfg->escan_ioctl_buf); - cfg->escan_ioctl_buf = NULL; kfree(cfg->extra_buf); cfg->extra_buf = NULL; kfree(cfg->wowl.nd); cfg->wowl.nd = NULL; kfree(cfg->wowl.nd_info); cfg->wowl.nd_info = NULL; + kfree(cfg->escan_info.escan_buf); + cfg->escan_info.escan_buf = NULL; } static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg) @@ -5417,9 +5417,6 @@ static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg) cfg->conf = kzalloc(sizeof(*cfg->conf), GFP_KERNEL); if (!cfg->conf) goto init_priv_mem_out; - cfg->escan_ioctl_buf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL); - if (!cfg->escan_ioctl_buf) - goto init_priv_mem_out; cfg->extra_buf = kzalloc(WL_EXTRA_BUF_MAX, GFP_KERNEL); if (!cfg->extra_buf) goto init_priv_mem_out; @@ -5431,6 +5428,9 @@ static s32 brcmf_init_priv_mem(struct brcmf_cfg80211_info *cfg) GFP_KERNEL); if (!cfg->wowl.nd_info) goto init_priv_mem_out; + cfg->escan_info.escan_buf = kzalloc(BRCMF_ESCAN_BUF_SIZE, GFP_KERNEL); + if (!cfg->escan_info.escan_buf) + goto init_priv_mem_out; return 0; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 97c327d6d3bb..01f096f7a089 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -28,8 +28,11 @@ #define WL_ROAM_TRIGGER_LEVEL -75 #define WL_ROAM_DELTA 20 -#define WL_ESCAN_BUF_SIZE (1024 * 64) -#define WL_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */ +/* Keep BRCMF_ESCAN_BUF_SIZE below 64K (65536). Allocing over 64K can be + * problematic on some systems and should be avoided. + */ +#define BRCMF_ESCAN_BUF_SIZE 65000 +#define BRCMF_ESCAN_TIMER_INTERVAL_MS 10000 /* E-Scan timeout */ #define WL_ESCAN_ACTION_START 1 #define WL_ESCAN_ACTION_CONTINUE 2 @@ -205,7 +208,7 @@ enum wl_escan_state { struct escan_info { u32 escan_state; - u8 escan_buf[WL_ESCAN_BUF_SIZE]; + u8 *escan_buf; struct wiphy *wiphy; struct brcmf_if *ifp; s32 (*run)(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp, @@ -278,7 +281,6 @@ struct brcmf_cfg80211_wowl { * @escan_info: escan information. * @escan_timeout: Timer for catch scan timeout. * @escan_timeout_work: scan timeout worker. - * @escan_ioctl_buf: dongle command buffer for escan commands. * @vif_list: linked list of vif instances. * @vif_cnt: number of vif instances. * @vif_event: vif event signalling. @@ -309,7 +311,6 @@ struct brcmf_cfg80211_info { struct escan_info escan_info; struct timer_list escan_timeout; struct work_struct escan_timeout_work; - u8 *escan_ioctl_buf; struct list_head vif_list; struct brcmf_cfg80211_vif_event vif_event; struct completion vif_disabled; -- cgit v1.2.3 From 6ea09153b6ccbee79869537ce61f5c16f736d72b Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 17 Feb 2016 11:26:52 +0100 Subject: brcmfmac: check for wowl support before enumerating feature flag In some cases wiphy->wowlan could be NULL if firmware doesn't have the support. Driver should check for support before walking down the feature flags. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5478128af02b..d66f08fa408c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6594,7 +6594,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SCAN_RANDOM_MAC)) { wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR; #ifdef CONFIG_PM - if (wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT) + if (wiphy->wowlan && + wiphy->wowlan->flags & WIPHY_WOWLAN_NET_DETECT) wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; #endif } -- cgit v1.2.3 From 73345fd212980d2e28a5c6d83801c903bd773680 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:26:53 +0100 Subject: brcmfmac: Configure country code using device specific settings Country code configuration in a device is a device specific operation. For this the country code as specified by reg notifier (iso3166 alpha2) needs to be translated to a device specific country locale and revision number. This patch adds this translation and puts a placeholder in the device specific settings where the translation table can be stored. Additional patches will be needed to read these tables from for example device platform data. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 75 +++++++++++++++++++--- .../wireless/broadcom/brcm80211/brcmfmac/common.c | 4 +- .../wireless/broadcom/brcm80211/brcmfmac/common.h | 31 +++++++++ .../broadcom/brcm80211/brcmfmac/fwil_types.h | 4 +- 4 files changed, 101 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d66f08fa408c..2fd18d04b61e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6405,28 +6405,85 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg, vif_event_equals(event, action), timeout); } +static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2], + struct brcmf_fil_country_le *ccreq) +{ + struct cc_translate *country_codes; + struct cc_entry *cc; + s32 found_index; + int i; + + country_codes = drvr->settings->country_codes; + if (!country_codes) { + brcmf_dbg(TRACE, "No country codes configured for device\n"); + return -EINVAL; + } + + if ((alpha2[0] == ccreq->country_abbrev[0]) && + (alpha2[1] == ccreq->country_abbrev[1])) { + brcmf_dbg(TRACE, "Country code already set\n"); + return -EAGAIN; + } + + found_index = -1; + for (i = 0; i < country_codes->table_size; i++) { + cc = &country_codes->table[i]; + if ((cc->iso3166[0] == '\0') && (found_index == -1)) + found_index = i; + if ((cc->iso3166[0] == alpha2[0]) && + (cc->iso3166[1] == alpha2[1])) { + found_index = i; + break; + } + } + if (found_index == -1) { + brcmf_dbg(TRACE, "No country code match found\n"); + return -EINVAL; + } + memset(ccreq, 0, sizeof(*ccreq)); + ccreq->rev = cpu_to_le32(country_codes->table[found_index].rev); + memcpy(ccreq->ccode, country_codes->table[found_index].cc, + BRCMF_COUNTRY_BUF_SZ); + ccreq->country_abbrev[0] = alpha2[0]; + ccreq->country_abbrev[1] = alpha2[1]; + ccreq->country_abbrev[2] = 0; + + return 0; +} + static void brcmf_cfg80211_reg_notifier(struct wiphy *wiphy, struct regulatory_request *req) { struct brcmf_cfg80211_info *cfg = wiphy_priv(wiphy); struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg)); struct brcmf_fil_country_le ccreq; + s32 err; int i; - brcmf_dbg(TRACE, "enter: initiator=%d, alpha=%c%c\n", req->initiator, - req->alpha2[0], req->alpha2[1]); - /* ignore non-ISO3166 country codes */ for (i = 0; i < sizeof(req->alpha2); i++) if (req->alpha2[i] < 'A' || req->alpha2[i] > 'Z') { - brcmf_err("not a ISO3166 code\n"); + brcmf_err("not a ISO3166 code (0x%02x 0x%02x)\n", + req->alpha2[0], req->alpha2[1]); return; } - memset(&ccreq, 0, sizeof(ccreq)); - ccreq.rev = cpu_to_le32(-1); - memcpy(ccreq.ccode, req->alpha2, sizeof(req->alpha2)); - if (brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq))) { - brcmf_err("firmware rejected country setting\n"); + + brcmf_dbg(TRACE, "Enter: initiator=%d, alpha=%c%c\n", req->initiator, + req->alpha2[0], req->alpha2[1]); + + err = brcmf_fil_iovar_data_get(ifp, "country", &ccreq, sizeof(ccreq)); + if (err) { + brcmf_err("Country code iovar returned err = %d\n", err); + return; + } + + err = brcmf_translate_country_code(ifp->drvr, req->alpha2, &ccreq); + if (err) + return; + + err = brcmf_fil_iovar_data_set(ifp, "country", &ccreq, sizeof(ccreq)); + if (err) { + brcmf_err("Firmware rejected country setting\n"); return; } brcmf_setup_wiphybands(wiphy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index cfee477a6eb1..8199862c7cdb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -230,10 +230,8 @@ void brcmf_mp_attach(void) int brcmf_mp_device_attach(struct brcmf_pub *drvr) { drvr->settings = kzalloc(sizeof(*drvr->settings), GFP_ATOMIC); - if (!drvr->settings) { - brcmf_err("Failed to alloca storage space for settings\n"); + if (!drvr->settings) return -ENOMEM; - } drvr->settings->sdiod_txglomsz = brcmf_sdiod_txglomsz; drvr->settings->p2p_enable = !!brcmf_p2p_enable; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index 3b0a63b98e99..b678b2d60749 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -15,6 +15,8 @@ #ifndef BRCMFMAC_COMMON_H #define BRCMFMAC_COMMON_H +#include "fwil_types.h" + extern const u8 ALLFFMAC[ETH_ALEN]; #define BRCMF_FW_ALTPATH_LEN 256 @@ -38,6 +40,33 @@ struct brcmf_mp_global_t { extern struct brcmf_mp_global_t brcmf_mp_global; +/** + * struct cc_entry - Struct for translating user space country code (iso3166) to + * firmware country code and revision. + * + * @iso3166: iso3166 alpha 2 country code string. + * @cc: firmware country code string. + * @rev: firmware country code revision. + */ +struct cc_entry { + char iso3166[BRCMF_COUNTRY_BUF_SZ]; + char cc[BRCMF_COUNTRY_BUF_SZ]; + s32 rev; +}; + +/** + * struct cc_translate - Struct for translating country codes as set by user + * space to a country code and rev which can be used by + * firmware. + * + * @table_size: number of entries in table (> 0) + * @table: dynamic array of 1 or more elements with translation information. + */ +struct cc_translate { + int table_size; + struct cc_entry table[0]; +}; + /** * struct brcmf_mp_device - Device module paramaters. * @@ -47,6 +76,7 @@ extern struct brcmf_mp_global_t brcmf_mp_global; * @feature_disable: Feature_disable bitmask. * @fcmode: FWS flow control. * @roamoff: Firmware roaming off? + * @country_codes: If available, pointer to struct for translating country codes */ struct brcmf_mp_device { int sdiod_txglomsz; @@ -56,6 +86,7 @@ struct brcmf_mp_device { int fcmode; bool roamoff; bool ignore_probe_fail; + struct cc_translate *country_codes; }; void brcmf_mp_attach(void); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 1afc2ad83b6c..e9e177dad1a6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -134,6 +134,8 @@ #define BRCMF_PFN_MAC_OUI_ONLY BIT(0) #define BRCMF_PFN_SET_MAC_UNASSOC BIT(1) +#define BRCMF_MCSSET_LEN 16 + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { BRCMF_JOIN_PREF_RSSI = 1, @@ -279,7 +281,7 @@ struct brcmf_bss_info_le { __le32 reserved32[1]; /* Reserved for expansion of BSS properties */ u8 flags; /* flags */ u8 reserved[3]; /* Reserved for expansion of BSS properties */ - u8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */ + u8 basic_mcs[BRCMF_MCSSET_LEN]; /* 802.11N BSS required MCS set */ __le16 ie_offset; /* offset at which IEs start, from beginning */ __le32 ie_length; /* byte length of Information Elements */ -- cgit v1.2.3 From 0aedbcaf6f182690790d98d90d5fe1e64c846c34 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:26:54 +0100 Subject: brcmfmac: Add length checks on firmware events Add additional length checks on firmware events to create more robust code. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Lei Zhang Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 10 ++++ .../wireless/broadcom/brcm80211/brcmfmac/fweh.c | 57 ++++-------------- .../wireless/broadcom/brcm80211/brcmfmac/fweh.h | 68 +++++++++++++++++----- .../net/wireless/broadcom/brcm80211/brcmfmac/p2p.c | 10 ++++ 4 files changed, 87 insertions(+), 58 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 2fd18d04b61e..6cc8fdc9fcc8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3092,6 +3092,11 @@ brcmf_notify_sched_scan_results(struct brcmf_if *ifp, brcmf_dbg(SCAN, "Enter\n"); + if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) { + brcmf_dbg(SCAN, "Event data to small. Ignore\n"); + return 0; + } + if (e->event_code == BRCMF_E_PFN_NET_LOST) { brcmf_dbg(SCAN, "PFN NET LOST event. Do Nothing\n"); return 0; @@ -3415,6 +3420,11 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e, brcmf_dbg(SCAN, "Enter\n"); + if (e->datalen < (sizeof(*pfn_result) + sizeof(*netinfo))) { + brcmf_dbg(SCAN, "Event data to small. Ignore\n"); + return 0; + } + pfn_result = (struct brcmf_pno_scanresults_le *)data; if (e->event_code == BRCMF_E_PFN_NET_LOST) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 7b26fb1b437c..d414fbbcc814 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -25,50 +25,6 @@ #include "fweh.h" #include "fwil.h" -/** - * struct brcm_ethhdr - broadcom specific ether header. - * - * @subtype: subtype for this packet. - * @length: TODO: length of appended data. - * @version: version indication. - * @oui: OUI of this packet. - * @usr_subtype: subtype for this OUI. - */ -struct brcm_ethhdr { - __be16 subtype; - __be16 length; - u8 version; - u8 oui[3]; - __be16 usr_subtype; -} __packed; - -struct brcmf_event_msg_be { - __be16 version; - __be16 flags; - __be32 event_type; - __be32 status; - __be32 reason; - __be32 auth_type; - __be32 datalen; - u8 addr[ETH_ALEN]; - char ifname[IFNAMSIZ]; - u8 ifidx; - u8 bsscfgidx; -} __packed; - -/** - * struct brcmf_event - contents of broadcom event packet. - * - * @eth: standard ether header. - * @hdr: broadcom specific ether header. - * @msg: common part of the actual event message. - */ -struct brcmf_event { - struct ethhdr eth; - struct brcm_ethhdr hdr; - struct brcmf_event_msg_be msg; -} __packed; - /** * struct brcmf_fweh_queue_item - event item on event queue. * @@ -85,6 +41,7 @@ struct brcmf_fweh_queue_item { u8 ifidx; u8 ifaddr[ETH_ALEN]; struct brcmf_event_msg_be emsg; + u32 datalen; u8 data[0]; }; @@ -294,6 +251,11 @@ static void brcmf_fweh_event_worker(struct work_struct *work) brcmf_dbg_hex_dump(BRCMF_EVENT_ON(), event->data, min_t(u32, emsg.datalen, 64), "event payload, len=%d\n", emsg.datalen); + if (emsg.datalen > event->datalen) { + brcmf_err("event invalid length header=%d, msg=%d\n", + event->datalen, emsg.datalen); + goto event_free; + } /* special handling of interface event */ if (event->code == BRCMF_E_IF) { @@ -439,7 +401,8 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp) * dispatch the event to a registered handler (using worker). */ void brcmf_fweh_process_event(struct brcmf_pub *drvr, - struct brcmf_event *event_packet) + struct brcmf_event *event_packet, + u32 packet_len) { enum brcmf_fweh_event_code code; struct brcmf_fweh_info *fweh = &drvr->fweh; @@ -459,6 +422,9 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, if (code != BRCMF_E_IF && !fweh->evt_handler[code]) return; + if (datalen > BRCMF_DCMD_MAXLEN) + return; + if (in_interrupt()) alloc_flag = GFP_ATOMIC; @@ -472,6 +438,7 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, /* use memcpy to get aligned event message */ memcpy(&event->emsg, &event_packet->msg, sizeof(event->emsg)); memcpy(event->data, data, datalen); + event->datalen = datalen; memcpy(event->ifaddr, event_packet->eth.h_dest, ETH_ALEN); brcmf_fweh_queue_event(fweh, event); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h index 5e39e2a9e388..26ff5a9648f3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h @@ -27,7 +27,6 @@ struct brcmf_pub; struct brcmf_if; struct brcmf_cfg80211_info; -struct brcmf_event; /* list of firmware events */ #define BRCMF_FWEH_EVENT_ENUM_DEFLIST \ @@ -180,12 +179,54 @@ enum brcmf_fweh_event_code { /** * definitions for event packet validation. */ -#define BRCMF_EVENT_OUI_OFFSET 19 -#define BRCM_OUI "\x00\x10\x18" -#define DOT11_OUI_LEN 3 -#define BCMILCP_BCM_SUBTYPE_EVENT 1 +#define BRCM_OUI "\x00\x10\x18" +#define BCMILCP_BCM_SUBTYPE_EVENT 1 +/** + * struct brcm_ethhdr - broadcom specific ether header. + * + * @subtype: subtype for this packet. + * @length: TODO: length of appended data. + * @version: version indication. + * @oui: OUI of this packet. + * @usr_subtype: subtype for this OUI. + */ +struct brcm_ethhdr { + __be16 subtype; + __be16 length; + u8 version; + u8 oui[3]; + __be16 usr_subtype; +} __packed; + +struct brcmf_event_msg_be { + __be16 version; + __be16 flags; + __be32 event_type; + __be32 status; + __be32 reason; + __be32 auth_type; + __be32 datalen; + u8 addr[ETH_ALEN]; + char ifname[IFNAMSIZ]; + u8 ifidx; + u8 bsscfgidx; +} __packed; + +/** + * struct brcmf_event - contents of broadcom event packet. + * + * @eth: standard ether header. + * @hdr: broadcom specific ether header. + * @msg: common part of the actual event message. + */ +struct brcmf_event { + struct ethhdr eth; + struct brcm_ethhdr hdr; + struct brcmf_event_msg_be msg; +} __packed; + /** * struct brcmf_event_msg - firmware event message. * @@ -256,34 +297,35 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr, enum brcmf_fweh_event_code code); int brcmf_fweh_activate_events(struct brcmf_if *ifp); void brcmf_fweh_process_event(struct brcmf_pub *drvr, - struct brcmf_event *event_packet); + struct brcmf_event *event_packet, + u32 packet_len); void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing); static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr, struct sk_buff *skb) { struct brcmf_event *event_packet; - u8 *data; u16 usr_stype; /* only process events when protocol matches */ if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL)) return; + if ((skb->len + ETH_HLEN) < sizeof(*event_packet)) + return; + /* check for BRCM oui match */ event_packet = (struct brcmf_event *)skb_mac_header(skb); - data = (u8 *)event_packet; - data += BRCMF_EVENT_OUI_OFFSET; - if (memcmp(BRCM_OUI, data, DOT11_OUI_LEN)) + if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0], + sizeof(event_packet->hdr.oui))) return; /* final match on usr_subtype */ - data += DOT11_OUI_LEN; - usr_stype = get_unaligned_be16(data); + usr_stype = get_unaligned_be16(&event_packet->hdr.usr_subtype); if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT) return; - brcmf_fweh_process_event(drvr, event_packet); + brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN); } #endif /* FWEH_H_ */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c index de5892a67c22..b5a49e564f25 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c @@ -1361,6 +1361,11 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp, u16 mgmt_type; u8 action; + if (e->datalen < sizeof(*rxframe)) { + brcmf_dbg(SCAN, "Event data to small. Ignore\n"); + return 0; + } + ch.chspec = be16_to_cpu(rxframe->chanspec); cfg->d11inf.decchspec(&ch); /* Check if wpa_supplicant has registered for this frame */ @@ -1858,6 +1863,11 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp, brcmf_dbg(INFO, "Enter: event %d reason %d\n", e->event_code, e->reason); + if (e->datalen < sizeof(*rxframe)) { + brcmf_dbg(SCAN, "Event data to small. Ignore\n"); + return 0; + } + ch.chspec = be16_to_cpu(rxframe->chanspec); cfg->d11inf.decchspec(&ch); -- cgit v1.2.3 From 52f22fb21764f083d697b3e35c18f37e471eba76 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 17 Feb 2016 11:26:55 +0100 Subject: brcmfmac: add neighbor discovery offload ip address table configuration Configure ipv6 address for neighbor discovery offload ip table in firmware obtained through ipv6 address notification callback. Reviewed-by: Hante Meuleman Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 23 ++-- .../wireless/broadcom/brcm80211/brcmfmac/core.c | 138 ++++++++++++++++++--- .../wireless/broadcom/brcm80211/brcmfmac/core.h | 7 ++ 3 files changed, 141 insertions(+), 27 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 6cc8fdc9fcc8..71f1fdf2e7e8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -456,7 +456,7 @@ send_key_to_dongle(struct brcmf_if *ifp, struct brcmf_wsec_key *key) } static s32 -brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) +brcmf_configure_arp_nd_offload(struct brcmf_if *ifp, bool enable) { s32 err; u32 mode; @@ -484,6 +484,15 @@ brcmf_configure_arp_offload(struct brcmf_if *ifp, bool enable) enable, mode); } + err = brcmf_fil_iovar_int_set(ifp, "ndoe", enable); + if (err) { + brcmf_dbg(TRACE, "failed to configure (%d) ND offload err = %d\n", + enable, err); + err = 0; + } else + brcmf_dbg(TRACE, "successfully configured (%d) ND offload to 0x%x\n", + enable, mode); + return err; } @@ -3543,7 +3552,7 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) brcmf_report_wowl_wakeind(wiphy, ifp); brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0); brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0); - brcmf_configure_arp_offload(ifp, true); + brcmf_configure_arp_nd_offload(ifp, true); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, cfg->wowl.pre_pmmode); cfg->wowl.active = false; @@ -3567,7 +3576,7 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, brcmf_dbg(TRACE, "Suspend, wowl config.\n"); - brcmf_configure_arp_offload(ifp, false); + brcmf_configure_arp_nd_offload(ifp, false); brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->wowl.pre_pmmode); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX); @@ -4336,7 +4345,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, if (!mbss) { brcmf_set_mpc(ifp, 0); - brcmf_configure_arp_offload(ifp, false); + brcmf_configure_arp_nd_offload(ifp, false); } /* find the RSN_IE */ @@ -4482,7 +4491,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, exit: if ((err) && (!mbss)) { brcmf_set_mpc(ifp, 1); - brcmf_configure_arp_offload(ifp, true); + brcmf_configure_arp_nd_offload(ifp, true); } return err; } @@ -4540,7 +4549,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev) brcmf_err("bss_enable config failed %d\n", err); } brcmf_set_mpc(ifp, 1); - brcmf_configure_arp_offload(ifp, true); + brcmf_configure_arp_nd_offload(ifp, true); clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state); brcmf_net_setcarrier(ifp, false); @@ -6287,7 +6296,7 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) if (err) goto default_conf_out; - brcmf_configure_arp_offload(ifp, true); + brcmf_configure_arp_nd_offload(ifp, true); cfg->dongle_up = true; default_conf_out: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index ed9998b69709..9507cc933c3b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -20,6 +20,8 @@ #include #include #include +#include +#include #include #include @@ -172,6 +174,35 @@ _brcmf_set_mac_address(struct work_struct *work) } } +#if IS_ENABLED(CONFIG_IPV6) +static void _brcmf_update_ndtable(struct work_struct *work) +{ + struct brcmf_if *ifp; + int i, ret; + + ifp = container_of(work, struct brcmf_if, ndoffload_work); + + /* clear the table in firmware */ + ret = brcmf_fil_iovar_data_set(ifp, "nd_hostip_clear", NULL, 0); + if (ret) { + brcmf_dbg(TRACE, "fail to clear nd ip table err:%d\n", ret); + return; + } + + for (i = 0; i < ifp->ipv6addr_idx; i++) { + ret = brcmf_fil_iovar_data_set(ifp, "nd_hostip", + &ifp->ipv6_addr_tbl[i], + sizeof(struct in6_addr)); + if (ret) + brcmf_err("add nd ip err %d\n", ret); + } +} +#else +static void _brcmf_update_ndtable(struct work_struct *work) +{ +} +#endif + static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr) { struct brcmf_if *ifp = netdev_priv(ndev); @@ -685,6 +716,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked) INIT_WORK(&ifp->setmacaddr_work, _brcmf_set_mac_address); INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list); + INIT_WORK(&ifp->ndoffload_work, _brcmf_update_ndtable); if (rtnl_locked) err = register_netdevice(ndev); @@ -884,6 +916,7 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx) if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) { cancel_work_sync(&ifp->setmacaddr_work); cancel_work_sync(&ifp->multicast_work); + cancel_work_sync(&ifp->ndoffload_work); } brcmf_net_detach(ifp->ndev); } else { @@ -1025,6 +1058,56 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb, } #endif +#if IS_ENABLED(CONFIG_IPV6) +static int brcmf_inet6addr_changed(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct brcmf_pub *drvr = container_of(nb, struct brcmf_pub, + inet6addr_notifier); + struct inet6_ifaddr *ifa = data; + struct brcmf_if *ifp; + int i; + struct in6_addr *table; + + /* Only handle primary interface */ + ifp = drvr->iflist[0]; + if (!ifp) + return NOTIFY_DONE; + if (ifp->ndev != ifa->idev->dev) + return NOTIFY_DONE; + + table = ifp->ipv6_addr_tbl; + for (i = 0; i < NDOL_MAX_ENTRIES; i++) + if (ipv6_addr_equal(&ifa->addr, &table[i])) + break; + + switch (action) { + case NETDEV_UP: + if (i == NDOL_MAX_ENTRIES) { + if (ifp->ipv6addr_idx < NDOL_MAX_ENTRIES) { + table[ifp->ipv6addr_idx++] = ifa->addr; + } else { + for (i = 0; i < NDOL_MAX_ENTRIES - 1; i++) + table[i] = table[i + 1]; + table[NDOL_MAX_ENTRIES - 1] = ifa->addr; + } + } + break; + case NETDEV_DOWN: + if (i < NDOL_MAX_ENTRIES) + for (; i < ifp->ipv6addr_idx; i++) + table[i] = table[i + 1]; + break; + default: + break; + } + + schedule_work(&ifp->ndoffload_work); + + return NOTIFY_OK; +} +#endif + int brcmf_attach(struct device *dev) { struct brcmf_pub *drvr = NULL; @@ -1164,30 +1247,41 @@ int brcmf_bus_start(struct device *dev) #ifdef CONFIG_INET drvr->inetaddr_notifier.notifier_call = brcmf_inetaddr_changed; ret = register_inetaddr_notifier(&drvr->inetaddr_notifier); + if (ret) + goto fail; + +#if IS_ENABLED(CONFIG_IPV6) + drvr->inet6addr_notifier.notifier_call = brcmf_inet6addr_changed; + ret = register_inet6addr_notifier(&drvr->inet6addr_notifier); + if (ret) { + unregister_inetaddr_notifier(&drvr->inetaddr_notifier); + goto fail; + } #endif +#endif /* CONFIG_INET */ + + return 0; fail: - if (ret < 0) { - brcmf_err("failed: %d\n", ret); - if (drvr->config) { - brcmf_cfg80211_detach(drvr->config); - drvr->config = NULL; - } - if (drvr->fws) { - brcmf_fws_del_interface(ifp); - brcmf_fws_deinit(drvr); - } - if (ifp) - brcmf_net_detach(ifp->ndev); - if (p2p_ifp) - brcmf_net_detach(p2p_ifp->ndev); - drvr->iflist[0] = NULL; - drvr->iflist[1] = NULL; - if (brcmf_ignoring_probe_fail(drvr)) - ret = 0; - return ret; + brcmf_err("failed: %d\n", ret); + if (drvr->config) { + brcmf_cfg80211_detach(drvr->config); + drvr->config = NULL; } - return 0; + if (drvr->fws) { + brcmf_fws_del_interface(ifp); + brcmf_fws_deinit(drvr); + } + if (ifp) + brcmf_net_detach(ifp->ndev); + if (p2p_ifp) + brcmf_net_detach(p2p_ifp->ndev); + drvr->iflist[0] = NULL; + drvr->iflist[1] = NULL; + if (brcmf_ignoring_probe_fail(drvr)) + ret = 0; + + return ret; } void brcmf_bus_add_txhdrlen(struct device *dev, uint len) @@ -1237,6 +1331,10 @@ void brcmf_detach(struct device *dev) unregister_inetaddr_notifier(&drvr->inetaddr_notifier); #endif +#if IS_ENABLED(CONFIG_IPV6) + unregister_inet6addr_notifier(&drvr->inet6addr_notifier); +#endif + /* stop firmware event handling */ brcmf_fweh_detach(drvr); if (drvr->config) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 8f39435f976f..500dac6e1f82 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -48,6 +48,8 @@ */ #define BRCMF_DRIVER_FIRMWARE_VERSION_LEN 32 +#define NDOL_MAX_ENTRIES 8 + /** * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info * @@ -143,6 +145,7 @@ struct brcmf_pub { #endif struct notifier_block inetaddr_notifier; + struct notifier_block inet6addr_notifier; struct brcmf_mp_device *settings; }; @@ -175,6 +178,7 @@ enum brcmf_netif_stop_reason { * @stats: interface specific network statistics. * @setmacaddr_work: worker object for setting mac address. * @multicast_work: worker object for multicast provisioning. + * @ndoffload_work: worker object for neighbor discovery offload configuration. * @fws_desc: interface specific firmware-signalling descriptor. * @ifidx: interface index in device firmware. * @bsscfgidx: index of bss associated with this interface. @@ -191,6 +195,7 @@ struct brcmf_if { struct net_device_stats stats; struct work_struct setmacaddr_work; struct work_struct multicast_work; + struct work_struct ndoffload_work; struct brcmf_fws_mac_descriptor *fws_desc; int ifidx; s32 bsscfgidx; @@ -199,6 +204,8 @@ struct brcmf_if { spinlock_t netif_stop_lock; atomic_t pend_8021x_cnt; wait_queue_head_t pend_8021x_wait; + struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; + u8 ipv6addr_idx; }; struct brcmf_skb_reorder_data { -- cgit v1.2.3 From cd2bc19c61b2da81ce310edb20f98e51fa7a5889 Mon Sep 17 00:00:00 2001 From: Franky Lin Date: Wed, 17 Feb 2016 11:26:56 +0100 Subject: brcmfmac: check return for ARP ip setting iovar The return value of iovar set function should be saved and checked. Reviewed-by: Pieter-Paul Giesberts Reviewed-by: Hante Meuleman Signed-off-by: Franky Lin Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 9507cc933c3b..dcf9393ad8ff 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1039,14 +1039,14 @@ static int brcmf_inetaddr_changed(struct notifier_block *nb, return NOTIFY_OK; } for (i = 0; i < ARPOL_MAX_ENTRIES; i++) { - if (addr_table[i] != 0) { - brcmf_fil_iovar_data_set(ifp, - "arp_hostip", &addr_table[i], - sizeof(addr_table[i])); - if (ret) - brcmf_err("add arp ip err %d\n", - ret); - } + if (addr_table[i] == 0) + continue; + ret = brcmf_fil_iovar_data_set(ifp, "arp_hostip", + &addr_table[i], + sizeof(addr_table[i])); + if (ret) + brcmf_err("add arp ip err %d\n", + ret); } } break; -- cgit v1.2.3 From 6ac27689b01e23e21f08d6f55d23a94eb10a8efc Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:26:57 +0100 Subject: brcmfmac: use device memsize config from fw if defined Newer type pcie devices have memory which get shared between fw and hw. The division of this memory is done firmware compile time. As a result the ramsize as used by driver needs to be adjusted for this. This is done by reading the memory size from the firmware. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/pcie.c | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index d5f9ef470447..d89212b4649e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -207,6 +207,10 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_CFGREG_REG_BAR3_CONFIG 0x4F4 #define BRCMF_PCIE_LINK_STATUS_CTRL_ASPM_ENAB 3 +/* Magic number at a magic location to find RAM size */ +#define BRCMF_RAMSIZE_MAGIC 0x534d4152 /* SMAR */ +#define BRCMF_RAMSIZE_OFFSET 0x6c + struct brcmf_pcie_console { u32 base_addr; @@ -1412,6 +1416,28 @@ static const struct brcmf_bus_ops brcmf_pcie_bus_ops = { }; +static void +brcmf_pcie_adjust_ramsize(struct brcmf_pciedev_info *devinfo, u8 *data, + u32 data_len) +{ + __le32 *field; + u32 newsize; + + if (data_len < BRCMF_RAMSIZE_OFFSET + 8) + return; + + field = (__le32 *)&data[BRCMF_RAMSIZE_OFFSET]; + if (le32_to_cpup(field) != BRCMF_RAMSIZE_MAGIC) + return; + field++; + newsize = le32_to_cpup(field); + + brcmf_dbg(PCIE, "Found ramsize info in FW, adjusting to 0x%x\n", + newsize); + devinfo->ci->ramsize = newsize; +} + + static int brcmf_pcie_init_share_ram_info(struct brcmf_pciedev_info *devinfo, u32 sharedram_addr) @@ -1694,6 +1720,13 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, brcmf_pcie_attach(devinfo); + /* Some of the firmwares have the size of the memory of the device + * defined inside the firmware. This is because part of the memory in + * the device is shared and the devision is determined by FW. Parse + * the firmware and adjust the chip memory size now. + */ + brcmf_pcie_adjust_ramsize(devinfo, (u8 *)fw->data, fw->size); + ret = brcmf_pcie_download_fw_nvram(devinfo, fw, nvram, nvram_len); if (ret) goto fail; -- cgit v1.2.3 From 9300bf8610fd90a63a04dcc6be8223552321edad Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:26:58 +0100 Subject: brcmfmac: use bar1 window size as provided by pci subsystem The PCIE bar1 window size is specified by chip. Currently the ioremap of bar1 was using a define which always matched the size of bar1, but newer chips can have a different bar1 sizes. With this patch the ioremap will be called with the by chip provided window size. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index d89212b4649e..3fd4e3b1a211 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -72,7 +72,6 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_FW_UP_TIMEOUT 2000 /* msec */ -#define BRCMF_PCIE_TCM_MAP_SIZE (4096 * 1024) #define BRCMF_PCIE_REG_MAP_SIZE (32 * 1024) /* backplane addres space accessed by BAR0 */ @@ -252,7 +251,6 @@ struct brcmf_pciedev_info { char nvram_name[BRCMF_FW_NAME_LEN]; void __iomem *regs; void __iomem *tcm; - u32 tcm_size; u32 ram_base; u32 ram_size; struct brcmf_chip *ci; @@ -1592,8 +1590,7 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) } devinfo->regs = ioremap_nocache(bar0_addr, BRCMF_PCIE_REG_MAP_SIZE); - devinfo->tcm = ioremap_nocache(bar1_addr, BRCMF_PCIE_TCM_MAP_SIZE); - devinfo->tcm_size = BRCMF_PCIE_TCM_MAP_SIZE; + devinfo->tcm = ioremap_nocache(bar1_addr, bar1_size); if (!devinfo->regs || !devinfo->tcm) { brcmf_err("ioremap() failed (%p,%p)\n", devinfo->regs, @@ -1602,8 +1599,9 @@ static int brcmf_pcie_get_resource(struct brcmf_pciedev_info *devinfo) } brcmf_dbg(PCIE, "Phys addr : reg space = %p base addr %#016llx\n", devinfo->regs, (unsigned long long)bar0_addr); - brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx\n", - devinfo->tcm, (unsigned long long)bar1_addr); + brcmf_dbg(PCIE, "Phys addr : mem space = %p base addr %#016llx size 0x%x\n", + devinfo->tcm, (unsigned long long)bar1_addr, + (unsigned int)bar1_size); return 0; } -- cgit v1.2.3 From bc86fdb9ac02c77b9f55325f64fb70decc425962 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:26:59 +0100 Subject: brcmfmac: add support for the PCIE 4366c0 chip A newer version of the 4366 PCIE chip has been released. Add support for this version of the chip. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 3fd4e3b1a211..3cb32b73fa23 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -53,6 +53,7 @@ BRCMF_FW_NVRAM_DEF(4358, "brcmfmac4358-pcie.bin", "brcmfmac4358-pcie.txt"); BRCMF_FW_NVRAM_DEF(4359, "brcmfmac4359-pcie.bin", "brcmfmac4359-pcie.txt"); BRCMF_FW_NVRAM_DEF(4365B, "brcmfmac4365b-pcie.bin", "brcmfmac4365b-pcie.txt"); BRCMF_FW_NVRAM_DEF(4366B, "brcmfmac4366b-pcie.bin", "brcmfmac4366b-pcie.txt"); +BRCMF_FW_NVRAM_DEF(4366C, "brcmfmac4366c-pcie.bin", "brcmfmac4366c-pcie.txt"); BRCMF_FW_NVRAM_DEF(4371, "brcmfmac4371-pcie.bin", "brcmfmac4371-pcie.txt"); static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { @@ -66,7 +67,8 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4358_CHIP_ID, 0xFFFFFFFF, 4358), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4359_CHIP_ID, 0xFFFFFFFF, 4359), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4365_CHIP_ID, 0xFFFFFFFF, 4365B), - BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFFF, 4366B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0x0000000F, 4366B), + BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4366_CHIP_ID, 0xFFFFFFF0, 4366C), BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4371_CHIP_ID, 0xFFFFFFFF, 4371), }; -- cgit v1.2.3 From d457a44fd85cd766c8bb48d6b542467bf89601b4 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:00 +0100 Subject: brcmfmac: remove pcie gen1 support The PCIE bus driver supports older gen1 (v1) chips, but there is no actual device which is using this older pcie core which is supported by brcmfmac. Remove all gen1 related code. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/pcie.c | 135 +++------------------ 1 file changed, 20 insertions(+), 115 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 3cb32b73fa23..1866b66b47f2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -100,9 +100,6 @@ static struct brcmf_firmware_mapping brcmf_pcie_fwnames[] = { #define BRCMF_PCIE_PCIE2REG_CONFIGDATA 0x124 #define BRCMF_PCIE_PCIE2REG_H2D_MAILBOX 0x140 -#define BRCMF_PCIE_GENREV1 1 -#define BRCMF_PCIE_GENREV2 2 - #define BRCMF_PCIE2_INTA 0x01 #define BRCMF_PCIE2_INTB 0x02 @@ -257,9 +254,7 @@ struct brcmf_pciedev_info { u32 ram_size; struct brcmf_chip *ci; u32 coreid; - u32 generic_corerev; struct brcmf_pcie_shared_info shared; - void (*ringbell)(struct brcmf_pciedev_info *devinfo); wait_queue_head_t mbdata_resp_wait; bool mbdata_completed; bool irq_allocated; @@ -746,68 +741,22 @@ static void brcmf_pcie_bus_console_read(struct brcmf_pciedev_info *devinfo) } -static __used void brcmf_pcie_ringbell_v1(struct brcmf_pciedev_info *devinfo) -{ - u32 reg_value; - - brcmf_dbg(PCIE, "RING !\n"); - reg_value = brcmf_pcie_read_reg32(devinfo, - BRCMF_PCIE_PCIE2REG_MAILBOXINT); - reg_value |= BRCMF_PCIE2_INTB; - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, - reg_value); -} - - -static void brcmf_pcie_ringbell_v2(struct brcmf_pciedev_info *devinfo) -{ - brcmf_dbg(PCIE, "RING !\n"); - /* Any arbitrary value will do, lets use 1 */ - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); -} - - static void brcmf_pcie_intr_disable(struct brcmf_pciedev_info *devinfo) { - if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) - pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, - 0); - else - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, - 0); + brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, 0); } static void brcmf_pcie_intr_enable(struct brcmf_pciedev_info *devinfo) { - if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) - pci_write_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTMASK, - BRCMF_PCIE_INT_DEF); - else - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, - BRCMF_PCIE_MB_INT_D2H_DB | - BRCMF_PCIE_MB_INT_FN0_0 | - BRCMF_PCIE_MB_INT_FN0_1); + brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXMASK, + BRCMF_PCIE_MB_INT_D2H_DB | + BRCMF_PCIE_MB_INT_FN0_0 | + BRCMF_PCIE_MB_INT_FN0_1); } -static irqreturn_t brcmf_pcie_quick_check_isr_v1(int irq, void *arg) -{ - struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; - u32 status; - - status = 0; - pci_read_config_dword(devinfo->pdev, BRCMF_PCIE_REG_INTSTATUS, &status); - if (status) { - brcmf_pcie_intr_disable(devinfo); - brcmf_dbg(PCIE, "Enter\n"); - return IRQ_WAKE_THREAD; - } - return IRQ_NONE; -} - - -static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg) +static irqreturn_t brcmf_pcie_quick_check_isr(int irq, void *arg) { struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; @@ -820,29 +769,7 @@ static irqreturn_t brcmf_pcie_quick_check_isr_v2(int irq, void *arg) } -static irqreturn_t brcmf_pcie_isr_thread_v1(int irq, void *arg) -{ - struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; - const struct pci_dev *pdev = devinfo->pdev; - u32 status; - - devinfo->in_irq = true; - status = 0; - pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); - brcmf_dbg(PCIE, "Enter %x\n", status); - if (status) { - pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); - if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) - brcmf_proto_msgbuf_rx_trigger(&devinfo->pdev->dev); - } - if (devinfo->state == BRCMFMAC_PCIE_STATE_UP) - brcmf_pcie_intr_enable(devinfo); - devinfo->in_irq = false; - return IRQ_HANDLED; -} - - -static irqreturn_t brcmf_pcie_isr_thread_v2(int irq, void *arg) +static irqreturn_t brcmf_pcie_isr_thread(int irq, void *arg) { struct brcmf_pciedev_info *devinfo = (struct brcmf_pciedev_info *)arg; u32 status; @@ -879,28 +806,14 @@ static int brcmf_pcie_request_irq(struct brcmf_pciedev_info *devinfo) brcmf_pcie_intr_disable(devinfo); brcmf_dbg(PCIE, "Enter\n"); - /* is it a v1 or v2 implementation */ + pci_enable_msi(pdev); - if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { - if (request_threaded_irq(pdev->irq, - brcmf_pcie_quick_check_isr_v1, - brcmf_pcie_isr_thread_v1, - IRQF_SHARED, "brcmf_pcie_intr", - devinfo)) { - pci_disable_msi(pdev); - brcmf_err("Failed to request IRQ %d\n", pdev->irq); - return -EIO; - } - } else { - if (request_threaded_irq(pdev->irq, - brcmf_pcie_quick_check_isr_v2, - brcmf_pcie_isr_thread_v2, - IRQF_SHARED, "brcmf_pcie_intr", - devinfo)) { - pci_disable_msi(pdev); - brcmf_err("Failed to request IRQ %d\n", pdev->irq); - return -EIO; - } + if (request_threaded_irq(pdev->irq, brcmf_pcie_quick_check_isr, + brcmf_pcie_isr_thread, IRQF_SHARED, + "brcmf_pcie_intr", devinfo)) { + pci_disable_msi(pdev); + brcmf_err("Failed to request IRQ %d\n", pdev->irq); + return -EIO; } devinfo->irq_allocated = true; return 0; @@ -931,16 +844,9 @@ static void brcmf_pcie_release_irq(struct brcmf_pciedev_info *devinfo) if (devinfo->in_irq) brcmf_err("Still in IRQ (processing) !!!\n"); - if (devinfo->generic_corerev == BRCMF_PCIE_GENREV1) { - status = 0; - pci_read_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, &status); - pci_write_config_dword(pdev, BRCMF_PCIE_REG_INTSTATUS, status); - } else { - status = brcmf_pcie_read_reg32(devinfo, - BRCMF_PCIE_PCIE2REG_MAILBOXINT); - brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, - status); - } + status = brcmf_pcie_read_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT); + brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_MAILBOXINT, status); + devinfo->irq_allocated = false; } @@ -989,7 +895,9 @@ static int brcmf_pcie_ring_mb_ring_bell(void *ctx) if (devinfo->state != BRCMFMAC_PCIE_STATE_UP) return -EIO; - devinfo->ringbell(devinfo); + brcmf_dbg(PCIE, "RING !\n"); + /* Any arbitrary value will do, lets use 1 */ + brcmf_pcie_write_reg32(devinfo, BRCMF_PCIE_PCIE2REG_H2D_MAILBOX, 1); return 0; } @@ -1503,9 +1411,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo, u32 address; u32 resetintr; - devinfo->ringbell = brcmf_pcie_ringbell_v2; - devinfo->generic_corerev = BRCMF_PCIE_GENREV2; - brcmf_dbg(PCIE, "Halt ARM.\n"); err = brcmf_pcie_enter_download_state(devinfo); if (err) -- cgit v1.2.3 From e9217b4b62a7296b5c008fd607579dbaf40bd754 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:01 +0100 Subject: brcmfmac: increase timeout for tx eapol When keys get set and updated this has to happen after eapol got transmitted (without key or old key) before the key can be updated. To make sure the order of sending eapol and configuring key is done correctly a timeout for tx of eapol is applied. This timeout is set to 50 msec, which is not always enough. Especially in AP mode and key updates the timeout may need to be much longer because client(s) can be in powersave. Increase the timeout from 50 to 950 msec. Reviewed-by: Arend Van Spriel Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index dcf9393ad8ff..3087d10f85b2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -42,7 +42,7 @@ MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); MODULE_LICENSE("Dual BSD/GPL"); -#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(50) +#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950) /* AMPDU rx reordering definitions */ #define BRCMF_RXREORDER_FLOWID_OFFSET 0 -- cgit v1.2.3 From d84d99e007770b4dd9b9e8be99da043c6555e274 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:02 +0100 Subject: brcmfmac: move module init and exit to common In preparation of module parameters for all devices the module init and exit routines are moved to the common file. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/common.c | 36 +++++++++++++++++++++- .../wireless/broadcom/brcm80211/brcmfmac/common.h | 1 - .../wireless/broadcom/brcm80211/brcmfmac/core.c | 15 ++------- .../wireless/broadcom/brcm80211/brcmfmac/core.h | 2 ++ 4 files changed, 39 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 8199862c7cdb..b8dc68db708f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -28,6 +28,10 @@ #include "tracepoint.h" #include "common.h" +MODULE_AUTHOR("Broadcom Corporation"); +MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); +MODULE_LICENSE("Dual BSD/GPL"); + const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; #define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40 @@ -221,7 +225,7 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) } #endif -void brcmf_mp_attach(void) +static void brcmf_mp_attach(void) { strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path, BRCMF_FW_ALTPATH_LEN); @@ -249,3 +253,33 @@ void brcmf_mp_device_detach(struct brcmf_pub *drvr) kfree(drvr->settings); } +static int __init brcmfmac_module_init(void) +{ + int err; + + /* Initialize debug system first */ + brcmf_debugfs_init(); + +#ifdef CONFIG_BRCMFMAC_SDIO + brcmf_sdio_init(); +#endif + /* Initialize global module paramaters */ + brcmf_mp_attach(); + + /* Continue the initialization by registering the different busses */ + err = brcmf_core_init(); + if (err) + brcmf_debugfs_exit(); + + return err; +} + +static void __exit brcmfmac_module_exit(void) +{ + brcmf_core_exit(); + brcmf_debugfs_exit(); +} + +module_init(brcmfmac_module_init); +module_exit(brcmfmac_module_exit); + diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index b678b2d60749..256be1b0c41b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -89,7 +89,6 @@ struct brcmf_mp_device { struct cc_translate *country_codes; }; -void brcmf_mp_attach(void); int brcmf_mp_device_attach(struct brcmf_pub *drvr); void brcmf_mp_device_detach(struct brcmf_pub *drvr); #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 3087d10f85b2..7b24e2b71a43 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -38,10 +38,6 @@ #include "pcie.h" #include "common.h" -MODULE_AUTHOR("Broadcom Corporation"); -MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); -MODULE_LICENSE("Dual BSD/GPL"); - #define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950) /* AMPDU rx reordering definitions */ @@ -1422,19 +1418,15 @@ static void brcmf_driver_register(struct work_struct *work) } static DECLARE_WORK(brcmf_driver_work, brcmf_driver_register); -static int __init brcmfmac_module_init(void) +int __init brcmf_core_init(void) { - brcmf_debugfs_init(); -#ifdef CONFIG_BRCMFMAC_SDIO - brcmf_sdio_init(); -#endif if (!schedule_work(&brcmf_driver_work)) return -EBUSY; return 0; } -static void __exit brcmfmac_module_exit(void) +void __exit brcmf_core_exit(void) { cancel_work_sync(&brcmf_driver_work); @@ -1447,8 +1439,5 @@ static void __exit brcmfmac_module_exit(void) #ifdef CONFIG_BRCMFMAC_PCIE brcmf_pcie_exit(); #endif - brcmf_debugfs_exit(); } -module_init(brcmfmac_module_init); -module_exit(brcmfmac_module_exit); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h index 500dac6e1f82..7bdb6fef99c3 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h @@ -227,5 +227,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp, void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success); void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb); void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on); +int __init brcmf_core_init(void); +void __exit brcmf_core_exit(void); #endif /* BRCMFMAC_CORE_H */ -- cgit v1.2.3 From 5c22fb85102a751e5a305d8fd13a1856a725bf01 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:03 +0100 Subject: brcmfmac: add wowl gtk rekeying offload support This patch adds support for gtk rekeying offload and for gtk rekeying failure during wowl mode. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 65 ++++++++++++++++++---- .../broadcom/brcm80211/brcmfmac/cfg80211.h | 2 + .../wireless/broadcom/brcm80211/brcmfmac/feature.c | 12 ++++ .../wireless/broadcom/brcm80211/brcmfmac/feature.h | 6 +- .../broadcom/brcm80211/brcmfmac/fwil_types.h | 21 ++++++- 5 files changed, 93 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 71f1fdf2e7e8..6d849bb2e27c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3526,6 +3526,10 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) else wakeup_data.net_detect = cfg->wowl.nd_info; } + if (wakeind & BRCMF_WOWL_GTK_FAILURE) { + brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_GTK_FAILURE\n"); + wakeup_data.gtk_rekey_failure = true; + } } else { wakeup = NULL; } @@ -3607,6 +3611,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, brcmf_fweh_register(cfg->pub, BRCMF_E_PFN_NET_FOUND, brcmf_wowl_nd_results); } + if (wowl->gtk_rekey_failure) + wowl_config |= BRCMF_WOWL_GTK_FAILURE; if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state)) wowl_config |= BRCMF_WOWL_UNASSOC; @@ -4874,7 +4880,32 @@ static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy, return ret; } -static struct cfg80211_ops wl_cfg80211_ops = { +#ifdef CONFIG_PM +static int +brcmf_cfg80211_set_rekey_data(struct wiphy *wiphy, struct net_device *ndev, + struct cfg80211_gtk_rekey_data *gtk) +{ + struct brcmf_if *ifp = netdev_priv(ndev); + struct brcmf_gtk_keyinfo_le gtk_le; + int ret; + + brcmf_dbg(TRACE, "Enter, bssidx=%d\n", ifp->bsscfgidx); + + memcpy(gtk_le.kck, gtk->kck, sizeof(gtk_le.kck)); + memcpy(gtk_le.kek, gtk->kek, sizeof(gtk_le.kek)); + memcpy(gtk_le.replay_counter, gtk->replay_ctr, + sizeof(gtk_le.replay_counter)); + + ret = brcmf_fil_iovar_data_set(ifp, "gtk_key_info", >k_le, + sizeof(gtk_le)); + if (ret < 0) + brcmf_err("gtk_key_info iovar failed: ret=%d\n", ret); + + return ret; +} +#endif + +static struct cfg80211_ops brcmf_cfg80211_ops = { .add_virtual_intf = brcmf_cfg80211_add_iface, .del_virtual_intf = brcmf_cfg80211_del_iface, .change_virtual_intf = brcmf_cfg80211_change_iface, @@ -6139,19 +6170,18 @@ static void brcmf_wiphy_wowl_params(struct wiphy *wiphy, struct brcmf_if *ifp) { #ifdef CONFIG_PM struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy); - s32 err; - u32 wowl_cap; if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) { - err = brcmf_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap); - if (!err) { - if (wowl_cap & BRCMF_WOWL_PFN_FOUND) { - brcmf_wowlan_support.flags |= - WIPHY_WOWLAN_NET_DETECT; - init_waitqueue_head(&cfg->wowl.nd_data_wait); - } + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ND)) { + brcmf_wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + init_waitqueue_head(&cfg->wowl.nd_data_wait); } } + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) { + brcmf_wowlan_support.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY; + brcmf_wowlan_support.flags |= WIPHY_WOWLAN_GTK_REKEY_FAILURE; + } + wiphy->wowlan = &brcmf_wowlan_support; #endif } @@ -6538,6 +6568,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, struct net_device *ndev = brcmf_get_ifp(drvr, 0)->ndev; struct brcmf_cfg80211_info *cfg; struct wiphy *wiphy; + struct cfg80211_ops *ops; struct brcmf_cfg80211_vif *vif; struct brcmf_if *ifp; s32 err = 0; @@ -6549,8 +6580,17 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, return NULL; } + ops = kzalloc(sizeof(*ops), GFP_KERNEL); + if (!ops) + return NULL; + + memcpy(ops, &brcmf_cfg80211_ops, sizeof(*ops)); ifp = netdev_priv(ndev); - wiphy = wiphy_new(&wl_cfg80211_ops, sizeof(struct brcmf_cfg80211_info)); +#ifdef CONFIG_PM + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_GTK)) + ops->set_rekey_data = brcmf_cfg80211_set_rekey_data; +#endif + wiphy = wiphy_new(ops, sizeof(struct brcmf_cfg80211_info)); if (!wiphy) { brcmf_err("Could not allocate wiphy device\n"); return NULL; @@ -6560,6 +6600,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, cfg = wiphy_priv(wiphy); cfg->wiphy = wiphy; + cfg->ops = ops; cfg->pub = drvr; init_vif_event(&cfg->vif_event); INIT_LIST_HEAD(&cfg->vif_list); @@ -6686,6 +6727,7 @@ priv_out: ifp->vif = NULL; wiphy_out: brcmf_free_wiphy(wiphy); + kfree(ops); return NULL; } @@ -6696,6 +6738,7 @@ void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg) brcmf_btcoex_detach(cfg); wiphy_unregister(cfg->wiphy); + kfree(cfg->ops); wl_deinit_priv(cfg); brcmf_free_wiphy(cfg->wiphy); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index 01f096f7a089..e4c1d4364684 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -256,6 +256,7 @@ struct brcmf_cfg80211_wowl { * struct brcmf_cfg80211_info - dongle private data of cfg80211 interface * * @wiphy: wiphy object for cfg80211 interface. + * @ops: pointer to copy of ops as registered with wiphy object. * @conf: dongle configuration. * @p2p: peer-to-peer specific information. * @btcoex: Bluetooth coexistence information. @@ -288,6 +289,7 @@ struct brcmf_cfg80211_wowl { */ struct brcmf_cfg80211_info { struct wiphy *wiphy; + struct cfg80211_ops *ops; struct brcmf_cfg80211_conf *conf; struct brcmf_p2p_info p2p; struct brcmf_btcoex_info *btcoex; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 1ffa95f1b8d2..098732a0c91b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -136,6 +136,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) { struct brcmf_if *ifp = brcmf_get_ifp(drvr, 0); struct brcmf_pno_macaddr_le pfn_mac; + u32 wowl_cap; s32 err; brcmf_feat_firmware_capabilities(ifp); @@ -143,6 +144,17 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_PNO, "pfn"); if (drvr->bus_if->wowl_supported) brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_WOWL, "wowl"); + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL)) { + err = brcmf_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap); + if (!err) { + if (wowl_cap & BRCMF_WOWL_PFN_FOUND) + ifp->drvr->feat_flags |= + BIT(BRCMF_FEAT_WOWL_ND); + if (wowl_cap & BRCMF_WOWL_GTK_FAILURE) + ifp->drvr->feat_flags |= + BIT(BRCMF_FEAT_WOWL_GTK); + } + } /* MBSS does not work for 43362 */ if (drvr->bus_if->chip == BRCM_CC_43362_CHIP_ID) ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_MBSS); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index 2e2479d41337..f940c29bcfaf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -27,6 +27,8 @@ * RSDB: Real Simultaneous Dual Band * TDLS: Tunneled Direct Link Setup * SCAN_RANDOM_MAC: Random MAC during (net detect) scheduled scan. + * WOWL_ND: WOWL net detect (PNO) + * WOWL_GTK: (WOWL) GTK rekeying offload */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -36,7 +38,9 @@ BRCMF_FEAT_DEF(P2P) \ BRCMF_FEAT_DEF(RSDB) \ BRCMF_FEAT_DEF(TDLS) \ - BRCMF_FEAT_DEF(SCAN_RANDOM_MAC) + BRCMF_FEAT_DEF(SCAN_RANDOM_MAC) \ + BRCMF_FEAT_DEF(WOWL_ND) \ + BRCMF_FEAT_DEF(WOWL_GTK) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index e9e177dad1a6..6d41ae392a8f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -111,7 +111,9 @@ /* Wakeup if received matched secured pattern: */ #define BRCMF_WOWL_SECURE (1 << 25) /* Wakeup on finding preferred network */ -#define BRCMF_WOWL_PFN_FOUND (1 << 26) +#define BRCMF_WOWL_PFN_FOUND (1 << 27) +/* Wakeup on receiving pairwise key EAP packets: */ +#define WIPHY_WOWL_EAP_PK (1 << 28) /* Link Down indication in WoWL mode: */ #define BRCMF_WOWL_LINKDOWN (1 << 31) @@ -136,6 +138,10 @@ #define BRCMF_MCSSET_LEN 16 +#define BRCMF_RSN_KCK_LENGTH 16 +#define BRCMF_RSN_KEK_LENGTH 16 +#define BRCMF_RSN_REPLAY_LEN 8 + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { BRCMF_JOIN_PREF_RSSI = 1, @@ -789,4 +795,17 @@ struct brcmf_pktcnt_le { __le32 rx_ocast_good_pkt; }; +/** + * struct brcmf_gtk_keyinfo_le - GTP rekey data + * + * @kck: key confirmation key. + * @kek: key encryption key. + * @replay_counter: replay counter. + */ +struct brcmf_gtk_keyinfo_le { + u8 kck[BRCMF_RSN_KCK_LENGTH]; + u8 kek[BRCMF_RSN_KEK_LENGTH]; + u8 replay_counter[BRCMF_RSN_REPLAY_LEN]; +}; + #endif /* FWIL_TYPES_H_ */ -- cgit v1.2.3 From 8ea56be0869f8230ed7b2779397225fe45080dd1 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:04 +0100 Subject: brcmfmac: move platform data retrieval code to common In preparation of module parameters for all devices the module platform data retrieval is moved from sdio to common. It is still only used for sdio devices. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 84 +++++----------------- .../wireless/broadcom/brcm80211/brcmfmac/common.c | 53 ++++++++++++-- .../wireless/broadcom/brcm80211/brcmfmac/common.h | 3 + .../net/wireless/broadcom/brcm80211/brcmfmac/of.c | 23 +++--- .../net/wireless/broadcom/brcm80211/brcmfmac/of.h | 6 +- 5 files changed, 84 insertions(+), 85 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index b98db8a0a069..25cd71229c95 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -27,8 +27,6 @@ #include #include #include -#include -#include #include #include #include @@ -46,7 +44,6 @@ #include "bus.h" #include "debug.h" #include "sdio.h" -#include "of.h" #include "core.h" #include "common.h" @@ -106,18 +103,18 @@ static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func) int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) { + struct brcmfmac_sdio_platform_data *pdata; int ret = 0; u8 data; u32 addr, gpiocontrol; unsigned long flags; - if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) { + pdata = sdiodev->pdata; + if ((pdata) && (pdata->oob_irq_supported)) { brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n", - sdiodev->pdata->oob_irq_nr); - ret = request_irq(sdiodev->pdata->oob_irq_nr, - brcmf_sdiod_oob_irqhandler, - sdiodev->pdata->oob_irq_flags, - "brcmf_oob_intr", + pdata->oob_irq_nr); + ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler, + pdata->oob_irq_flags, "brcmf_oob_intr", &sdiodev->func[1]->dev); if (ret != 0) { brcmf_err("request_irq failed %d\n", ret); @@ -129,7 +126,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) sdiodev->irq_en = true; spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); - ret = enable_irq_wake(sdiodev->pdata->oob_irq_nr); + ret = enable_irq_wake(pdata->oob_irq_nr); if (ret != 0) { brcmf_err("enable_irq_wake failed %d\n", ret); return ret; @@ -158,7 +155,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) /* redirect, configure and enable io for interrupt signal */ data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; - if (sdiodev->pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) + if (pdata->oob_irq_flags & IRQF_TRIGGER_HIGH) data |= SDIO_SEPINT_ACT_HI; brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); @@ -176,9 +173,12 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) { + struct brcmfmac_sdio_platform_data *pdata; + brcmf_dbg(SDIO, "Entering\n"); - if ((sdiodev->pdata) && (sdiodev->pdata->oob_irq_supported)) { + pdata = sdiodev->pdata; + if ((pdata) && (pdata->oob_irq_supported)) { sdio_claim_host(sdiodev->func[1]); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); @@ -187,11 +187,10 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) if (sdiodev->oob_irq_requested) { sdiodev->oob_irq_requested = false; if (sdiodev->irq_wake) { - disable_irq_wake(sdiodev->pdata->oob_irq_nr); + disable_irq_wake(pdata->oob_irq_nr); sdiodev->irq_wake = false; } - free_irq(sdiodev->pdata->oob_irq_nr, - &sdiodev->func[1]->dev); + free_irq(pdata->oob_irq_nr, &sdiodev->func[1]->dev); sdiodev->irq_en = false; } } else { @@ -1103,8 +1102,6 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); -static struct brcmfmac_sdio_platform_data *brcmfmac_sdio_pdata; - static void brcmf_sdiod_acpi_set_power_manageable(struct device *dev, int val) @@ -1167,10 +1164,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, dev_set_drvdata(&func->dev, bus_if); dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); sdiodev->dev = &sdiodev->func[1]->dev; - sdiodev->pdata = brcmfmac_sdio_pdata; - - if (!sdiodev->pdata) - brcmf_of_probe(sdiodev); + sdiodev->pdata = brcmf_get_module_param(sdiodev->dev); #ifdef CONFIG_PM_SLEEP /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ @@ -1296,7 +1290,7 @@ static const struct dev_pm_ops brcmf_sdio_pm_ops = { static struct sdio_driver brcmf_sdmmc_driver = { .probe = brcmf_ops_sdio_probe, .remove = brcmf_ops_sdio_remove, - .name = BRCMFMAC_SDIO_PDATA_NAME, + .name = KBUILD_MODNAME, .id_table = brcmf_sdmmc_ids, .drv = { .owner = THIS_MODULE, @@ -1306,37 +1300,6 @@ static struct sdio_driver brcmf_sdmmc_driver = { }, }; -static int __init brcmf_sdio_pd_probe(struct platform_device *pdev) -{ - brcmf_dbg(SDIO, "Enter\n"); - - brcmfmac_sdio_pdata = dev_get_platdata(&pdev->dev); - - if (brcmfmac_sdio_pdata->power_on) - brcmfmac_sdio_pdata->power_on(); - - return 0; -} - -static int brcmf_sdio_pd_remove(struct platform_device *pdev) -{ - brcmf_dbg(SDIO, "Enter\n"); - - if (brcmfmac_sdio_pdata->power_off) - brcmfmac_sdio_pdata->power_off(); - - sdio_unregister_driver(&brcmf_sdmmc_driver); - - return 0; -} - -static struct platform_driver brcmf_sdio_pd = { - .remove = brcmf_sdio_pd_remove, - .driver = { - .name = BRCMFMAC_SDIO_PDATA_NAME, - } -}; - void brcmf_sdio_register(void) { int ret; @@ -1350,19 +1313,6 @@ void brcmf_sdio_exit(void) { brcmf_dbg(SDIO, "Enter\n"); - if (brcmfmac_sdio_pdata) - platform_driver_unregister(&brcmf_sdio_pd); - else - sdio_unregister_driver(&brcmf_sdmmc_driver); + sdio_unregister_driver(&brcmf_sdmmc_driver); } -void __init brcmf_sdio_init(void) -{ - int ret; - - brcmf_dbg(SDIO, "Enter\n"); - - ret = platform_driver_probe(&brcmf_sdio_pd, brcmf_sdio_pd_probe); - if (ret == -ENODEV) - brcmf_dbg(SDIO, "No platform data available.\n"); -} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index b8dc68db708f..020901c2e0ca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -27,6 +27,7 @@ #include "fwil_types.h" #include "tracepoint.h" #include "common.h" +#include "of.h" MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); @@ -79,6 +80,7 @@ module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0); MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); #endif +static struct brcmfmac_sdio_platform_data *brcmfmac_pdata; struct brcmf_mp_global_t brcmf_mp_global; int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) @@ -231,6 +233,13 @@ static void brcmf_mp_attach(void) BRCMF_FW_ALTPATH_LEN); } +struct brcmfmac_sdio_platform_data *brcmf_get_module_param(struct device *dev) +{ + if (!brcmfmac_pdata) + brcmf_of_probe(dev, &brcmfmac_pdata); + return brcmfmac_pdata; +} + int brcmf_mp_device_attach(struct brcmf_pub *drvr) { drvr->settings = kzalloc(sizeof(*drvr->settings), GFP_ATOMIC); @@ -253,6 +262,35 @@ void brcmf_mp_device_detach(struct brcmf_pub *drvr) kfree(drvr->settings); } +static int __init brcmf_common_pd_probe(struct platform_device *pdev) +{ + brcmf_dbg(INFO, "Enter\n"); + + brcmfmac_pdata = dev_get_platdata(&pdev->dev); + + if (brcmfmac_pdata->power_on) + brcmfmac_pdata->power_on(); + + return 0; +} + +static int brcmf_common_pd_remove(struct platform_device *pdev) +{ + brcmf_dbg(INFO, "Enter\n"); + + if (brcmfmac_pdata->power_off) + brcmfmac_pdata->power_off(); + + return 0; +} + +static struct platform_driver brcmf_pd = { + .remove = brcmf_common_pd_remove, + .driver = { + .name = BRCMFMAC_SDIO_PDATA_NAME, + } +}; + static int __init brcmfmac_module_init(void) { int err; @@ -260,16 +298,21 @@ static int __init brcmfmac_module_init(void) /* Initialize debug system first */ brcmf_debugfs_init(); -#ifdef CONFIG_BRCMFMAC_SDIO - brcmf_sdio_init(); -#endif + /* Get the platform data (if available) for our devices */ + err = platform_driver_probe(&brcmf_pd, brcmf_common_pd_probe); + if (err == -ENODEV) + brcmf_dbg(INFO, "No platform data available.\n"); + /* Initialize global module paramaters */ brcmf_mp_attach(); /* Continue the initialization by registering the different busses */ err = brcmf_core_init(); - if (err) + if (err) { brcmf_debugfs_exit(); + if (brcmfmac_pdata) + platform_driver_unregister(&brcmf_pd); + } return err; } @@ -277,6 +320,8 @@ static int __init brcmfmac_module_init(void) static void __exit brcmfmac_module_exit(void) { brcmf_core_exit(); + if (brcmfmac_pdata) + platform_driver_unregister(&brcmf_pd); brcmf_debugfs_exit(); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index 256be1b0c41b..54a26ede808d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -15,6 +15,8 @@ #ifndef BRCMFMAC_COMMON_H #define BRCMFMAC_COMMON_H +#include +#include #include "fwil_types.h" extern const u8 ALLFFMAC[ETH_ALEN]; @@ -89,6 +91,7 @@ struct brcmf_mp_device { struct cc_translate *country_codes; }; +struct brcmfmac_sdio_platform_data *brcmf_get_module_param(struct device *dev); int brcmf_mp_device_attach(struct brcmf_pub *drvr); void brcmf_mp_device_detach(struct brcmf_pub *drvr); #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 03f35e0c52ca..8201d937b826 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -16,17 +16,16 @@ #include #include #include -#include -#include -#include #include #include "debug.h" -#include "sdio.h" +#include "core.h" +#include "common.h" +#include "of.h" -void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) +void +brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_platform_data **sdio) { - struct device *dev = sdiodev->dev; struct device_node *np = dev->of_node; int irq; u32 irqf; @@ -35,12 +34,12 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) return; - sdiodev->pdata = devm_kzalloc(dev, sizeof(*sdiodev->pdata), GFP_KERNEL); - if (!sdiodev->pdata) + *sdio = devm_kzalloc(dev, sizeof(*sdio), GFP_KERNEL); + if (!(*sdio)) return; if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) - sdiodev->pdata->drive_strength = val; + (*sdio)->drive_strength = val; /* make sure there are interrupts defined in the node */ if (!of_find_property(np, "interrupts", NULL)) @@ -53,7 +52,7 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) } irqf = irqd_get_trigger_type(irq_get_irq_data(irq)); - sdiodev->pdata->oob_irq_supported = true; - sdiodev->pdata->oob_irq_nr = irq; - sdiodev->pdata->oob_irq_flags = irqf; + (*sdio)->oob_irq_supported = true; + (*sdio)->oob_irq_nr = irq; + (*sdio)->oob_irq_flags = irqf; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h index 5f7c3550deda..84b474484cea 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h @@ -14,9 +14,11 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef CONFIG_OF -void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev); +void +brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_platform_data **sdio); #else -static void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev) +static void brcmf_of_probe(struct device *dev, + struct brcmfmac_sdio_platform_data **sdio) { } #endif /* CONFIG_OF */ -- cgit v1.2.3 From 73ef9e640e94ab6205c0bb92dc8bac53a40d952e Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:05 +0100 Subject: brcmfmac: keep ARP and ND offload enabled during WOWL Currently ARP and ND (IPv6 Neigbor Discovery) offload get disabled on entering suspend. However when firmwares support the wowl_cap iovar then these offload routines can be kept enabled as they will work during WOWL as well. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | 6 ++++-- drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c | 1 + drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h | 4 +++- 3 files changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 6d849bb2e27c..5609a79df1c1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -3556,7 +3556,8 @@ static s32 brcmf_cfg80211_resume(struct wiphy *wiphy) brcmf_report_wowl_wakeind(wiphy, ifp); brcmf_fil_iovar_int_set(ifp, "wowl_clear", 0); brcmf_config_wowl_pattern(ifp, "clr", NULL, 0, NULL, 0); - brcmf_configure_arp_nd_offload(ifp, true); + if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ARP_ND)) + brcmf_configure_arp_nd_offload(ifp, true); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, cfg->wowl.pre_pmmode); cfg->wowl.active = false; @@ -3580,7 +3581,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg, brcmf_dbg(TRACE, "Suspend, wowl config.\n"); - brcmf_configure_arp_nd_offload(ifp, false); + if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL_ARP_ND)) + brcmf_configure_arp_nd_offload(ifp, false); brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_PM, &cfg->wowl.pre_pmmode); brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_PM, PM_MAX); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index 098732a0c91b..bfa19bb632b7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -147,6 +147,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_WOWL)) { err = brcmf_fil_iovar_int_get(ifp, "wowl_cap", &wowl_cap); if (!err) { + ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_WOWL_ARP_ND); if (wowl_cap & BRCMF_WOWL_PFN_FOUND) ifp->drvr->feat_flags |= BIT(BRCMF_FEAT_WOWL_ND); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index f940c29bcfaf..a3efa35a5c21 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -29,6 +29,7 @@ * SCAN_RANDOM_MAC: Random MAC during (net detect) scheduled scan. * WOWL_ND: WOWL net detect (PNO) * WOWL_GTK: (WOWL) GTK rekeying offload + * WOWL_ARP_ND: ARP and Neighbor Discovery offload support during WOWL. */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -40,7 +41,8 @@ BRCMF_FEAT_DEF(TDLS) \ BRCMF_FEAT_DEF(SCAN_RANDOM_MAC) \ BRCMF_FEAT_DEF(WOWL_ND) \ - BRCMF_FEAT_DEF(WOWL_GTK) + BRCMF_FEAT_DEF(WOWL_GTK) \ + BRCMF_FEAT_DEF(WOWL_ARP_ND) /* * Quirks: -- cgit v1.2.3 From 4d7928959832ea41f7f91456b76da19cad01bd09 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:07 +0100 Subject: brcmfmac: switch to new platform data Platform data is only available for sdio. With this patch a new platform data structure is being used which allows for platform data for any device and configurable per device. This patch only switches to the new structure and adds support for SDIO devices. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 15 +- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 4 +- .../wireless/broadcom/brcm80211/brcmfmac/common.c | 43 ++++- .../wireless/broadcom/brcm80211/brcmfmac/common.h | 35 +--- .../net/wireless/broadcom/brcm80211/brcmfmac/of.c | 3 +- .../net/wireless/broadcom/brcm80211/brcmfmac/of.h | 5 +- .../wireless/broadcom/brcm80211/brcmfmac/sdio.c | 77 +++++---- .../wireless/broadcom/brcm80211/brcmfmac/sdio.h | 2 +- include/linux/platform_data/brcmfmac-sdio.h | 135 --------------- include/linux/platform_data/brcmfmac.h | 185 +++++++++++++++++++++ 10 files changed, 282 insertions(+), 222 deletions(-) delete mode 100644 include/linux/platform_data/brcmfmac-sdio.h create mode 100644 include/linux/platform_data/brcmfmac.h (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 25cd71229c95..bb4aece9ad2c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -103,7 +103,7 @@ static void brcmf_sdiod_dummy_irqhandler(struct sdio_func *func) int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) { - struct brcmfmac_sdio_platform_data *pdata; + struct brcmfmac_sdio_pd *pdata; int ret = 0; u8 data; u32 addr, gpiocontrol; @@ -173,7 +173,7 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) { - struct brcmfmac_sdio_platform_data *pdata; + struct brcmfmac_sdio_pd *pdata; brcmf_dbg(SDIO, "Entering\n"); @@ -1164,17 +1164,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, dev_set_drvdata(&func->dev, bus_if); dev_set_drvdata(&sdiodev->func[1]->dev, bus_if); sdiodev->dev = &sdiodev->func[1]->dev; - sdiodev->pdata = brcmf_get_module_param(sdiodev->dev); - -#ifdef CONFIG_PM_SLEEP - /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ - * is true or when platform data OOB irq is true). - */ - if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && - ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || - (sdiodev->pdata && sdiodev->pdata->oob_irq_supported))) - bus_if->wowl_supported = true; -#endif brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_DOWN); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5609a79df1c1..5e3acaca7231 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -6459,8 +6459,8 @@ int brcmf_cfg80211_wait_vif_event(struct brcmf_cfg80211_info *cfg, static s32 brcmf_translate_country_code(struct brcmf_pub *drvr, char alpha2[2], struct brcmf_fil_country_le *ccreq) { - struct cc_translate *country_codes; - struct cc_entry *cc; + struct brcmfmac_pd_cc *country_codes; + struct brcmfmac_pd_cc_entry *cc; s32 found_index; int i; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 020901c2e0ca..4bd3225cdea6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -80,7 +80,7 @@ module_param_named(ignore_probe_fail, brcmf_ignore_probe_fail, int, 0); MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging"); #endif -static struct brcmfmac_sdio_platform_data *brcmfmac_pdata; +static struct brcmfmac_platform_data *brcmfmac_pdata; struct brcmf_mp_global_t brcmf_mp_global; int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) @@ -229,15 +229,46 @@ void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...) static void brcmf_mp_attach(void) { + /* If module param firmware path is set then this will always be used, + * if not set then if available use the platform data version. To make + * sure it gets initialized at all, always copy the module param version + */ strlcpy(brcmf_mp_global.firmware_path, brcmf_firmware_path, BRCMF_FW_ALTPATH_LEN); + if ((brcmfmac_pdata) && (brcmfmac_pdata->fw_alternative_path) && + (brcmf_mp_global.firmware_path[0] == '\0')) { + strlcpy(brcmf_mp_global.firmware_path, + brcmfmac_pdata->fw_alternative_path, + BRCMF_FW_ALTPATH_LEN); + } } -struct brcmfmac_sdio_platform_data *brcmf_get_module_param(struct device *dev) +struct brcmfmac_sdio_pd *brcmf_get_module_param(struct device *dev, + enum brcmf_bus_type bus_type, + u32 chip, u32 chiprev) { - if (!brcmfmac_pdata) - brcmf_of_probe(dev, &brcmfmac_pdata); - return brcmfmac_pdata; + struct brcmfmac_sdio_pd *pdata; + struct brcmfmac_pd_device *device_pd; + int i; + + if (brcmfmac_pdata) { + for (i = 0; i < brcmfmac_pdata->device_count; i++) { + device_pd = &brcmfmac_pdata->devices[i]; + if ((device_pd->bus_type == bus_type) && + (device_pd->id == chip) && + ((device_pd->rev == chiprev) || + (device_pd->rev == -1))) { + brcmf_dbg(INFO, "Platform data for device found\n"); + if (device_pd->bus_type == BRCMF_BUSTYPE_SDIO) + return &device_pd->bus.sdio; + break; + } + } + } + pdata = NULL; + brcmf_of_probe(dev, &pdata); + + return pdata; } int brcmf_mp_device_attach(struct brcmf_pub *drvr) @@ -287,7 +318,7 @@ static int brcmf_common_pd_remove(struct platform_device *pdev) static struct platform_driver brcmf_pd = { .remove = brcmf_common_pd_remove, .driver = { - .name = BRCMFMAC_SDIO_PDATA_NAME, + .name = BRCMFMAC_PDATA_NAME, } }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index 54a26ede808d..a64e40e8bfda 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -16,7 +16,7 @@ #define BRCMFMAC_COMMON_H #include -#include +#include #include "fwil_types.h" extern const u8 ALLFFMAC[ETH_ALEN]; @@ -42,33 +42,6 @@ struct brcmf_mp_global_t { extern struct brcmf_mp_global_t brcmf_mp_global; -/** - * struct cc_entry - Struct for translating user space country code (iso3166) to - * firmware country code and revision. - * - * @iso3166: iso3166 alpha 2 country code string. - * @cc: firmware country code string. - * @rev: firmware country code revision. - */ -struct cc_entry { - char iso3166[BRCMF_COUNTRY_BUF_SZ]; - char cc[BRCMF_COUNTRY_BUF_SZ]; - s32 rev; -}; - -/** - * struct cc_translate - Struct for translating country codes as set by user - * space to a country code and rev which can be used by - * firmware. - * - * @table_size: number of entries in table (> 0) - * @table: dynamic array of 1 or more elements with translation information. - */ -struct cc_translate { - int table_size; - struct cc_entry table[0]; -}; - /** * struct brcmf_mp_device - Device module paramaters. * @@ -88,10 +61,12 @@ struct brcmf_mp_device { int fcmode; bool roamoff; bool ignore_probe_fail; - struct cc_translate *country_codes; + struct brcmfmac_pd_cc *country_codes; }; -struct brcmfmac_sdio_platform_data *brcmf_get_module_param(struct device *dev); +struct brcmfmac_sdio_pd *brcmf_get_module_param(struct device *dev, + enum brcmf_bus_type bus_type, + u32 chip, u32 chiprev); int brcmf_mp_device_attach(struct brcmf_pub *drvr); void brcmf_mp_device_detach(struct brcmf_pub *drvr); #ifdef DEBUG diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index 8201d937b826..ece0b65dd039 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -23,8 +23,7 @@ #include "common.h" #include "of.h" -void -brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_platform_data **sdio) +void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio) { struct device_node *np = dev->of_node; int irq; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h index 84b474484cea..1ba951f9b542 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h @@ -15,10 +15,9 @@ */ #ifdef CONFIG_OF void -brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_platform_data **sdio); +brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio); #else -static void brcmf_of_probe(struct device *dev, - struct brcmfmac_sdio_platform_data **sdio) +static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio) { } #endif /* CONFIG_OF */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index c790fa89db05..6e367041f691 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -33,8 +33,6 @@ #include #include #include -#include -#include #include #include #include @@ -44,6 +42,8 @@ #include "sdio.h" #include "chip.h" #include "firmware.h" +#include "core.h" +#include "common.h" #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) @@ -3775,26 +3775,28 @@ static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = { static bool brcmf_sdio_probe_attach(struct brcmf_sdio *bus) { + struct brcmf_sdio_dev *sdiodev; u8 clkctl = 0; int err = 0; int reg_addr; u32 reg_val; u32 drivestrength; - sdio_claim_host(bus->sdiodev->func[1]); + sdiodev = bus->sdiodev; + sdio_claim_host(sdiodev->func[1]); pr_debug("F1 signature read @0x18000000=0x%4x\n", - brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL)); + brcmf_sdiod_regrl(sdiodev, SI_ENUM_BASE, NULL)); /* * Force PLL off until brcmf_chip_attach() * programs PLL control regs */ - brcmf_sdiod_regwb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, + brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, BRCMF_INIT_CLKCTL1, &err); if (!err) - clkctl = brcmf_sdiod_regrb(bus->sdiodev, + clkctl = brcmf_sdiod_regrb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (err || ((clkctl & ~SBSDIO_AVBITS) != BRCMF_INIT_CLKCTL1)) { @@ -3803,50 +3805,77 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) goto fail; } - bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops); + bus->ci = brcmf_chip_attach(sdiodev, &brcmf_sdio_buscore_ops); if (IS_ERR(bus->ci)) { brcmf_err("brcmf_chip_attach failed!\n"); bus->ci = NULL; goto fail; } + sdiodev->pdata = brcmf_get_module_param(sdiodev->dev, + BRCMF_BUSTYPE_SDIO, + bus->ci->chip, + bus->ci->chiprev); + /* platform specific configuration: + * alignments must be at least 4 bytes for ADMA + */ + bus->head_align = ALIGNMENT; + bus->sgentry_align = ALIGNMENT; + if (sdiodev->pdata) { + if (sdiodev->pdata->sd_head_align > ALIGNMENT) + bus->head_align = sdiodev->pdata->sd_head_align; + if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT) + bus->sgentry_align = sdiodev->pdata->sd_sgentry_align; + } + /* allocate scatter-gather table. sg support + * will be disabled upon allocation failure. + */ + brcmf_sdiod_sgtable_alloc(sdiodev); + +#ifdef CONFIG_PM_SLEEP + /* wowl can be supported when KEEP_POWER is true and (WAKE_SDIO_IRQ + * is true or when platform data OOB irq is true). + */ + if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && + ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || + (sdiodev->pdata && sdiodev->pdata->oob_irq_supported))) + sdiodev->bus_if->wowl_supported = true; +#endif if (brcmf_sdio_kso_init(bus)) { brcmf_err("error enabling KSO\n"); goto fail; } - if ((bus->sdiodev->pdata) && (bus->sdiodev->pdata->drive_strength)) - drivestrength = bus->sdiodev->pdata->drive_strength; + if ((sdiodev->pdata) && (sdiodev->pdata->drive_strength)) + drivestrength = sdiodev->pdata->drive_strength; else drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; - brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength); + brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength); /* Set card control so an SDIO card reset does a WLAN backplane reset */ - reg_val = brcmf_sdiod_regrb(bus->sdiodev, - SDIO_CCCR_BRCM_CARDCTRL, &err); + reg_val = brcmf_sdiod_regrb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, &err); if (err) goto fail; reg_val |= SDIO_CCCR_BRCM_CARDCTRL_WLANRESET; - brcmf_sdiod_regwb(bus->sdiodev, - SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err); + brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_CARDCTRL, reg_val, &err); if (err) goto fail; /* set PMUControl so a backplane reset does PMU state reload */ reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol); - reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err); + reg_val = brcmf_sdiod_regrl(sdiodev, reg_addr, &err); if (err) goto fail; reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT); - brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err); + brcmf_sdiod_regwl(sdiodev, reg_addr, reg_val, &err); if (err) goto fail; - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(sdiodev->func[1]); brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN); @@ -3867,7 +3896,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) return true; fail: - sdio_release_host(bus->sdiodev->func[1]); + sdio_release_host(sdiodev->func[1]); return false; } @@ -4045,18 +4074,6 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) bus->txminmax = BRCMF_TXMINMAX; bus->tx_seq = SDPCM_SEQ_WRAP - 1; - /* platform specific configuration: - * alignments must be at least 4 bytes for ADMA - */ - bus->head_align = ALIGNMENT; - bus->sgentry_align = ALIGNMENT; - if (sdiodev->pdata) { - if (sdiodev->pdata->sd_head_align > ALIGNMENT) - bus->head_align = sdiodev->pdata->sd_head_align; - if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT) - bus->sgentry_align = sdiodev->pdata->sd_sgentry_align; - } - /* single-threaded workqueue */ wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM, dev_name(&sdiodev->func[1]->dev)); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 23f223150cef..50df9cb21af2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -184,7 +184,7 @@ struct brcmf_sdio_dev { struct brcmf_sdio *bus; struct device *dev; struct brcmf_bus *bus_if; - struct brcmfmac_sdio_platform_data *pdata; + struct brcmfmac_sdio_pd *pdata; bool oob_irq_requested; bool irq_en; /* irq enable flags */ spinlock_t irq_en_lock; diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h deleted file mode 100644 index e75dcbf2b230..000000000000 --- a/include/linux/platform_data/brcmfmac-sdio.h +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (c) 2013 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _LINUX_BRCMFMAC_PLATFORM_H -#define _LINUX_BRCMFMAC_PLATFORM_H - -/* - * Platform specific driver functions and data. Through the platform specific - * device data functions can be provided to help the brcmfmac driver to - * operate with the device in combination with the used platform. - * - * Use the platform data in the following (similar) way: - * - * -#include - - -static void brcmfmac_power_on(void) -{ -} - -static void brcmfmac_power_off(void) -{ -} - -static void brcmfmac_reset(void) -{ -} - -static struct brcmfmac_sdio_platform_data brcmfmac_sdio_pdata = { - .power_on = brcmfmac_power_on, - .power_off = brcmfmac_power_off, - .reset = brcmfmac_reset -}; - -static struct platform_device brcmfmac_device = { - .name = BRCMFMAC_SDIO_PDATA_NAME, - .id = PLATFORM_DEVID_NONE, - .dev.platform_data = &brcmfmac_sdio_pdata -}; - -void __init brcmfmac_init_pdata(void) -{ - brcmfmac_sdio_pdata.oob_irq_supported = true; - brcmfmac_sdio_pdata.oob_irq_nr = gpio_to_irq(GPIO_BRCMF_SDIO_OOB); - brcmfmac_sdio_pdata.oob_irq_flags = IORESOURCE_IRQ | - IORESOURCE_IRQ_HIGHLEVEL; - platform_device_register(&brcmfmac_device); -} - * - * - * Note: the brcmfmac can be loaded as module or be statically built-in into - * the kernel. If built-in then do note that it uses module_init (and - * module_exit) routines which equal device_initcall. So if you intend to - * create a module with the platform specific data for the brcmfmac and have - * it built-in to the kernel then use a higher initcall then device_initcall - * (see init.h). If this is not done then brcmfmac will load without problems - * but will not pickup the platform data. - * - * When the driver does not "detect" platform driver data then it will continue - * without reporting anything and just assume there is no data needed. Which is - * probably true for most platforms. - * - * Explanation of the platform_data fields: - * - * drive_strength: is the preferred drive_strength to be used for the SDIO - * pins. If 0 then a default value will be used. This is the target drive - * strength, the exact drive strength which will be used depends on the - * capabilities of the device. - * - * oob_irq_supported: does the board have support for OOB interrupts. SDIO - * in-band interrupts are relatively slow and for having less overhead on - * interrupt processing an out of band interrupt can be used. If the HW - * supports this then enable this by setting this field to true and configure - * the oob related fields. - * - * oob_irq_nr, oob_irq_flags: the OOB interrupt information. The values are - * used for registering the irq using request_irq function. - * - * broken_sg_support: flag for broken sg list support of SDIO host controller. - * Set this to true if the SDIO host controller has higher align requirement - * than 32 bytes for each scatterlist item. - * - * sd_head_align: alignment requirement for start of data buffer - * - * sd_sgentry_align: length alignment requirement for each sg entry - * - * power_on: This function is called by the brcmfmac when the module gets - * loaded. This can be particularly useful for low power devices. The platform - * spcific routine may for example decide to power up the complete device. - * If there is no use-case for this function then provide NULL. - * - * power_off: This function is called by the brcmfmac when the module gets - * unloaded. At this point the device can be powered down or otherwise be reset. - * So if an actual power_off is not supported but reset is then reset the device - * when this function gets called. This can be particularly useful for low power - * devices. If there is no use-case for this function (either power-down or - * reset) then provide NULL. - * - * reset: This function can get called if the device communication broke down. - * This functionality is particularly useful in case of SDIO type devices. It is - * possible to reset a dongle via sdio data interface, but it requires that - * this is fully functional. This function is chip/module specific and this - * function should return only after the complete reset has completed. - */ - -#define BRCMFMAC_SDIO_PDATA_NAME "brcmfmac_sdio" - -struct brcmfmac_sdio_platform_data { - unsigned int drive_strength; - bool oob_irq_supported; - unsigned int oob_irq_nr; - unsigned long oob_irq_flags; - bool broken_sg_support; - unsigned short sd_head_align; - unsigned short sd_sgentry_align; - void (*power_on)(void); - void (*power_off)(void); - void (*reset)(void); -}; - -#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ diff --git a/include/linux/platform_data/brcmfmac.h b/include/linux/platform_data/brcmfmac.h new file mode 100644 index 000000000000..1d30bf278231 --- /dev/null +++ b/include/linux/platform_data/brcmfmac.h @@ -0,0 +1,185 @@ +/* + * Copyright (c) 201 Broadcom Corporation + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _LINUX_BRCMFMAC_PLATFORM_H +#define _LINUX_BRCMFMAC_PLATFORM_H + + +#define BRCMFMAC_PDATA_NAME "brcmfmac" + +#define BRCMFMAC_COUNTRY_BUF_SZ 4 + + +/* + * Platform specific driver functions and data. Through the platform specific + * device data functions and data can be provided to help the brcmfmac driver to + * operate with the device in combination with the used platform. + */ + + +/** + * Note: the brcmfmac can be loaded as module or be statically built-in into + * the kernel. If built-in then do note that it uses module_init (and + * module_exit) routines which equal device_initcall. So if you intend to + * create a module with the platform specific data for the brcmfmac and have + * it built-in to the kernel then use a higher initcall then device_initcall + * (see init.h). If this is not done then brcmfmac will load without problems + * but will not pickup the platform data. + * + * When the driver does not "detect" platform driver data then it will continue + * without reporting anything and just assume there is no data needed. Which is + * probably true for most platforms. + */ + +/** + * enum brcmf_bus_type - Bus type identifier. Currently SDIO, USB and PCIE are + * supported. + */ +enum brcmf_bus_type { + BRCMF_BUSTYPE_SDIO, + BRCMF_BUSTYPE_USB, + BRCMF_BUSTYPE_PCIE +}; + + +/** + * struct brcmfmac_sdio_pd - SDIO Device specific platform data. + * + * @txglomsz: SDIO txglom size. Use 0 if default of driver is to be + * used. + * @drive_strength: is the preferred drive_strength to be used for the SDIO + * pins. If 0 then a default value will be used. This is + * the target drive strength, the exact drive strength + * which will be used depends on the capabilities of the + * device. + * @oob_irq_supported: does the board have support for OOB interrupts. SDIO + * in-band interrupts are relatively slow and for having + * less overhead on interrupt processing an out of band + * interrupt can be used. If the HW supports this then + * enable this by setting this field to true and configure + * the oob related fields. + * @oob_irq_nr, + * @oob_irq_flags: the OOB interrupt information. The values are used for + * registering the irq using request_irq function. + * @broken_sg_support: flag for broken sg list support of SDIO host controller. + * Set this to true if the SDIO host controller has higher + * align requirement than 32 bytes for each scatterlist + * item. + * @sd_head_align: alignment requirement for start of data buffer. + * @sd_sgentry_align: length alignment requirement for each sg entry. + * @reset: This function can get called if the device communication + * broke down. This functionality is particularly useful in + * case of SDIO type devices. It is possible to reset a + * dongle via sdio data interface, but it requires that + * this is fully functional. This function is chip/module + * specific and this function should return only after the + * complete reset has completed. + */ +struct brcmfmac_sdio_pd { + int txglomsz; + unsigned int drive_strength; + bool oob_irq_supported; + unsigned int oob_irq_nr; + unsigned long oob_irq_flags; + bool broken_sg_support; + unsigned short sd_head_align; + unsigned short sd_sgentry_align; + void (*reset)(void); +}; + +/** + * struct brcmfmac_pd_cc_entry - Struct for translating user space country code + * (iso3166) to firmware country code and + * revision. + * + * @iso3166: iso3166 alpha 2 country code string. + * @cc: firmware country code string. + * @rev: firmware country code revision. + */ +struct brcmfmac_pd_cc_entry { + char iso3166[BRCMFMAC_COUNTRY_BUF_SZ]; + char cc[BRCMFMAC_COUNTRY_BUF_SZ]; + s32 rev; +}; + +/** + * struct brcmfmac_pd_cc - Struct for translating country codes as set by user + * space to a country code and rev which can be used by + * firmware. + * + * @table_size: number of entries in table (> 0) + * @table: array of 1 or more elements with translation information. + */ +struct brcmfmac_pd_cc { + int table_size; + struct brcmfmac_pd_cc_entry table[0]; +}; + +/** + * struct brcmfmac_pd_device - Device specific platform data. (id/rev/bus_type) + * is the unique identifier of the device. + * + * @id: ID of the device for which this data is. In case of SDIO + * or PCIE this is the chipid as identified by chip.c In + * case of USB this is the chipid as identified by the + * device query. + * @rev: chip revision, see id. + * @bus_type: The type of bus. Some chipid/rev exist for different bus + * types. Each bus type has its own set of settings. + * @feature_disable: Bitmask of features to disable (override), See feature.c + * in brcmfmac for details. + * @country_codes: If available, pointer to struct for translating country + * codes. + * @bus: Bus specific (union) device settings. Currently only + * SDIO. + */ +struct brcmfmac_pd_device { + unsigned int id; + unsigned int rev; + enum brcmf_bus_type bus_type; + unsigned int feature_disable; + struct brcmfmac_pd_cc *country_codes; + union { + struct brcmfmac_sdio_pd sdio; + } bus; +}; + +/** + * struct brcmfmac_platform_data - BRCMFMAC specific platform data. + * + * @power_on: This function is called by the brcmfmac driver when the module + * gets loaded. This can be particularly useful for low power + * devices. The platform spcific routine may for example decide to + * power up the complete device. If there is no use-case for this + * function then provide NULL. + * @power_off: This function is called by the brcmfmac when the module gets + * unloaded. At this point the devices can be powered down or + * otherwise be reset. So if an actual power_off is not supported + * but reset is supported by the devices then reset the devices + * when this function gets called. This can be particularly useful + * for low power devices. If there is no use-case for this + * function then provide NULL. + */ +struct brcmfmac_platform_data { + void (*power_on)(void); + void (*power_off)(void); + char *fw_alternative_path; + int device_count; + struct brcmfmac_pd_device devices[0]; +}; + + +#endif /* _LINUX_BRCMFMAC_PLATFORM_H */ -- cgit v1.2.3 From af5b5e62f72e08e6e7e3d3cd78ee7e4cec96f7c8 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:08 +0100 Subject: brcmfmac: merge platform data and module paramaters Merge module parameters and platform data in one struct. This is the last step to move to the new platform data per device. Now parameters of platform data will be merged with module parameters per device. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c | 20 +++---- .../net/wireless/broadcom/brcm80211/brcmfmac/bus.h | 4 +- .../wireless/broadcom/brcm80211/brcmfmac/common.c | 68 +++++++++++++--------- .../wireless/broadcom/brcm80211/brcmfmac/common.h | 39 +++++-------- .../wireless/broadcom/brcm80211/brcmfmac/core.c | 11 +--- .../net/wireless/broadcom/brcm80211/brcmfmac/of.c | 14 ++--- .../net/wireless/broadcom/brcm80211/brcmfmac/of.h | 5 +- .../wireless/broadcom/brcm80211/brcmfmac/pcie.c | 24 ++++++-- .../wireless/broadcom/brcm80211/brcmfmac/sdio.c | 46 ++++++++------- .../wireless/broadcom/brcm80211/brcmfmac/sdio.h | 2 +- .../net/wireless/broadcom/brcm80211/brcmfmac/usb.c | 16 ++++- 11 files changed, 139 insertions(+), 110 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index bb4aece9ad2c..da0cdd313880 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -109,8 +109,8 @@ int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev) u32 addr, gpiocontrol; unsigned long flags; - pdata = sdiodev->pdata; - if ((pdata) && (pdata->oob_irq_supported)) { + pdata = &sdiodev->settings->bus.sdio; + if (pdata->oob_irq_supported) { brcmf_dbg(SDIO, "Enter, register OOB IRQ %d\n", pdata->oob_irq_nr); ret = request_irq(pdata->oob_irq_nr, brcmf_sdiod_oob_irqhandler, @@ -177,8 +177,8 @@ int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev) brcmf_dbg(SDIO, "Entering\n"); - pdata = sdiodev->pdata; - if ((pdata) && (pdata->oob_irq_supported)) { + pdata = &sdiodev->settings->bus.sdio; + if (pdata->oob_irq_supported) { sdio_claim_host(sdiodev->func[1]); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, 0, NULL); brcmf_sdiod_regwb(sdiodev, SDIO_CCCR_IENx, 0, NULL); @@ -522,7 +522,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, target_list = pktlist; /* for host with broken sg support, prepare a page aligned list */ __skb_queue_head_init(&local_list); - if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) { + if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { req_sz = 0; skb_queue_walk(pktlist, pkt_next) req_sz += pkt_next->len; @@ -629,7 +629,7 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, uint fn, } } - if (sdiodev->pdata && sdiodev->pdata->broken_sg_support && !write) { + if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { local_pkt_next = local_list.next; orig_offset = 0; skb_queue_walk(pktlist, pkt_next) { @@ -900,7 +900,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) return; nents = max_t(uint, BRCMF_DEFAULT_RXGLOM_SIZE, - sdiodev->bus_if->drvr->settings->sdiod_txglomsz); + sdiodev->settings->bus.sdio.txglomsz); nents += (nents >> 4) + 1; WARN_ON(nents > sdiodev->max_segment_count); @@ -912,7 +912,7 @@ void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) sdiodev->sg_support = false; } - sdiodev->txglomsz = sdiodev->bus_if->drvr->settings->sdiod_txglomsz; + sdiodev->txglomsz = sdiodev->settings->bus.sdio.txglomsz; } #ifdef CONFIG_PM_SLEEP @@ -1246,8 +1246,8 @@ static int brcmf_ops_sdio_suspend(struct device *dev) sdio_flags = MMC_PM_KEEP_POWER; if (sdiodev->wowl_enabled) { - if (sdiodev->pdata->oob_irq_supported) - enable_irq_wake(sdiodev->pdata->oob_irq_nr); + if (sdiodev->settings->bus.sdio.oob_irq_supported) + enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr); else sdio_flags |= MMC_PM_WAKE_SDIO_IRQ; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h index 36093f93bfbe..8e02a478e889 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h @@ -43,6 +43,8 @@ enum brcmf_bus_protocol_type { BRCMF_PROTO_MSGBUF }; +struct brcmf_mp_device; + struct brcmf_bus_dcmd { char *name; char *param; @@ -217,7 +219,7 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt, void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp); /* Indication from bus module regarding presence/insertion of dongle. */ -int brcmf_attach(struct device *dev); +int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings); /* Indication from bus module regarding removal/absence of dongle */ void brcmf_detach(struct device *dev); /* Indication from bus module that dongle should be reset */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 4bd3225cdea6..9e909e3c2f0c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -243,14 +243,35 @@ static void brcmf_mp_attach(void) } } -struct brcmfmac_sdio_pd *brcmf_get_module_param(struct device *dev, - enum brcmf_bus_type bus_type, - u32 chip, u32 chiprev) +struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, + enum brcmf_bus_type bus_type, + u32 chip, u32 chiprev) { - struct brcmfmac_sdio_pd *pdata; + struct brcmf_mp_device *settings; struct brcmfmac_pd_device *device_pd; + bool found; int i; + brcmf_dbg(INFO, "Enter, bus=%d, chip=%d, rev=%d\n", bus_type, chip, + chiprev); + settings = kzalloc(sizeof(*settings), GFP_ATOMIC); + if (!settings) + return NULL; + + /* start by using the module paramaters */ + settings->p2p_enable = !!brcmf_p2p_enable; + settings->feature_disable = brcmf_feature_disable; + settings->fcmode = brcmf_fcmode; + settings->roamoff = !!brcmf_roamoff; +#ifdef DEBUG + settings->ignore_probe_fail = !!brcmf_ignore_probe_fail; +#endif + + if (bus_type == BRCMF_BUSTYPE_SDIO) + settings->bus.sdio.txglomsz = brcmf_sdiod_txglomsz; + + /* See if there is any device specific platform data configured */ + found = false; if (brcmfmac_pdata) { for (i = 0; i < brcmfmac_pdata->device_count; i++) { device_pd = &brcmfmac_pdata->devices[i]; @@ -259,38 +280,29 @@ struct brcmfmac_sdio_pd *brcmf_get_module_param(struct device *dev, ((device_pd->rev == chiprev) || (device_pd->rev == -1))) { brcmf_dbg(INFO, "Platform data for device found\n"); + settings->country_codes = + device_pd->country_codes; if (device_pd->bus_type == BRCMF_BUSTYPE_SDIO) - return &device_pd->bus.sdio; + memcpy(&settings->bus.sdio, + &device_pd->bus.sdio, + sizeof(settings->bus.sdio)); + found = true; break; } } } - pdata = NULL; - brcmf_of_probe(dev, &pdata); - - return pdata; -} - -int brcmf_mp_device_attach(struct brcmf_pub *drvr) -{ - drvr->settings = kzalloc(sizeof(*drvr->settings), GFP_ATOMIC); - if (!drvr->settings) - return -ENOMEM; - - drvr->settings->sdiod_txglomsz = brcmf_sdiod_txglomsz; - drvr->settings->p2p_enable = !!brcmf_p2p_enable; - drvr->settings->feature_disable = brcmf_feature_disable; - drvr->settings->fcmode = brcmf_fcmode; - drvr->settings->roamoff = !!brcmf_roamoff; -#ifdef DEBUG - drvr->settings->ignore_probe_fail = !!brcmf_ignore_probe_fail; -#endif - return 0; + if ((bus_type == BRCMF_BUSTYPE_SDIO) && (!found)) { + /* No platform data for this device. In case of SDIO try OF + * (Open Firwmare) Device Tree. + */ + brcmf_of_probe(dev, &settings->bus.sdio); + } + return settings; } -void brcmf_mp_device_detach(struct brcmf_pub *drvr) +void brcmf_release_module_param(struct brcmf_mp_device *module_param) { - kfree(drvr->settings); + kfree(module_param); } static int __init brcmf_common_pd_probe(struct platform_device *pdev) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index a64e40e8bfda..bd095abca393 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -45,41 +45,30 @@ extern struct brcmf_mp_global_t brcmf_mp_global; /** * struct brcmf_mp_device - Device module paramaters. * - * @sdiod_txglomsz: SDIO txglom size. - * @joinboost_5g_rssi: 5g rssi booost for preferred join selection. * @p2p_enable: Legacy P2P0 enable (old wpa_supplicant). * @feature_disable: Feature_disable bitmask. * @fcmode: FWS flow control. * @roamoff: Firmware roaming off? + * @ignore_probe_fail: Ignore probe failure. * @country_codes: If available, pointer to struct for translating country codes + * @bus: Bus specific platform data. Only SDIO at the mmoment. */ struct brcmf_mp_device { - int sdiod_txglomsz; - int joinboost_5g_rssi; - bool p2p_enable; - int feature_disable; - int fcmode; - bool roamoff; - bool ignore_probe_fail; + bool p2p_enable; + unsigned int feature_disable; + int fcmode; + bool roamoff; + bool ignore_probe_fail; struct brcmfmac_pd_cc *country_codes; + union { + struct brcmfmac_sdio_pd sdio; + } bus; }; -struct brcmfmac_sdio_pd *brcmf_get_module_param(struct device *dev, - enum brcmf_bus_type bus_type, - u32 chip, u32 chiprev); -int brcmf_mp_device_attach(struct brcmf_pub *drvr); -void brcmf_mp_device_detach(struct brcmf_pub *drvr); -#ifdef DEBUG -static inline bool brcmf_ignoring_probe_fail(struct brcmf_pub *drvr) -{ - return drvr->settings->ignore_probe_fail; -} -#else -static inline bool brcmf_ignoring_probe_fail(struct brcmf_pub *drvr) -{ - return false; -} -#endif +struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, + enum brcmf_bus_type bus_type, + u32 chip, u32 chiprev); +void brcmf_release_module_param(struct brcmf_mp_device *module_param); /* Sets dongle media info (drv_version, mac address). */ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 7b24e2b71a43..ff825cd7739e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1104,7 +1104,7 @@ static int brcmf_inet6addr_changed(struct notifier_block *nb, } #endif -int brcmf_attach(struct device *dev) +int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) { struct brcmf_pub *drvr = NULL; int ret = 0; @@ -1126,10 +1126,7 @@ int brcmf_attach(struct device *dev) drvr->hdrlen = 0; drvr->bus_if = dev_get_drvdata(dev); drvr->bus_if->drvr = drvr; - - /* Initialize device specific settings */ - if (brcmf_mp_device_attach(drvr)) - goto fail; + drvr->settings = settings; /* attach debug facilities */ brcmf_debug_attach(drvr); @@ -1274,7 +1271,7 @@ fail: brcmf_net_detach(p2p_ifp->ndev); drvr->iflist[0] = NULL; drvr->iflist[1] = NULL; - if (brcmf_ignoring_probe_fail(drvr)) + if (drvr->settings->ignore_probe_fail) ret = 0; return ret; @@ -1350,8 +1347,6 @@ void brcmf_detach(struct device *dev) brcmf_proto_detach(drvr); - brcmf_mp_device_detach(drvr); - brcmf_debug_detach(drvr); bus_if->drvr = NULL; kfree(drvr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index ece0b65dd039..425c41dc0a59 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -23,7 +23,7 @@ #include "common.h" #include "of.h" -void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio) +void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio) { struct device_node *np = dev->of_node; int irq; @@ -33,12 +33,8 @@ void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio) if (!np || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) return; - *sdio = devm_kzalloc(dev, sizeof(*sdio), GFP_KERNEL); - if (!(*sdio)) - return; - if (of_property_read_u32(np, "brcm,drive-strength", &val) == 0) - (*sdio)->drive_strength = val; + sdio->drive_strength = val; /* make sure there are interrupts defined in the node */ if (!of_find_property(np, "interrupts", NULL)) @@ -51,7 +47,7 @@ void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio) } irqf = irqd_get_trigger_type(irq_get_irq_data(irq)); - (*sdio)->oob_irq_supported = true; - (*sdio)->oob_irq_nr = irq; - (*sdio)->oob_irq_flags = irqf; + sdio->oob_irq_supported = true; + sdio->oob_irq_nr = irq; + sdio->oob_irq_flags = irqf; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h index 1ba951f9b542..a9d94c15d0f5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.h @@ -14,10 +14,9 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #ifdef CONFIG_OF -void -brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio); +void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio); #else -static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd **sdio) +static void brcmf_of_probe(struct device *dev, struct brcmfmac_sdio_pd *sdio) { } #endif /* CONFIG_OF */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 1866b66b47f2..52fef5e1d615 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -37,6 +37,8 @@ #include "pcie.h" #include "firmware.h" #include "chip.h" +#include "core.h" +#include "common.h" enum brcmf_pcie_state { @@ -266,6 +268,7 @@ struct brcmf_pciedev_info { u16 (*read_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset); void (*write_ptr)(struct brcmf_pciedev_info *devinfo, u32 mem_offset, u16 value); + struct brcmf_mp_device *settings; }; struct brcmf_pcie_ringbuf { @@ -1525,16 +1528,16 @@ static void brcmf_pcie_release_resource(struct brcmf_pciedev_info *devinfo) } -static int brcmf_pcie_attach_bus(struct device *dev) +static int brcmf_pcie_attach_bus(struct brcmf_pciedev_info *devinfo) { int ret; /* Attach to the common driver interface */ - ret = brcmf_attach(dev); + ret = brcmf_attach(&devinfo->pdev->dev, devinfo->settings); if (ret) { brcmf_err("brcmf_attach failed\n"); } else { - ret = brcmf_bus_start(dev); + ret = brcmf_bus_start(&devinfo->pdev->dev); if (ret) brcmf_err("dongle is not responding\n"); } @@ -1672,7 +1675,7 @@ static void brcmf_pcie_setup(struct device *dev, const struct firmware *fw, init_waitqueue_head(&devinfo->mbdata_resp_wait); brcmf_pcie_intr_enable(devinfo); - if (brcmf_pcie_attach_bus(bus->dev) == 0) + if (brcmf_pcie_attach_bus(devinfo) == 0) return; brcmf_pcie_bus_console_read(devinfo); @@ -1716,6 +1719,15 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail; } + devinfo->settings = brcmf_get_module_param(&devinfo->pdev->dev, + BRCMF_BUSTYPE_PCIE, + devinfo->ci->chip, + devinfo->ci->chiprev); + if (!devinfo->settings) { + ret = -ENOMEM; + goto fail; + } + bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) { ret = -ENOMEM; @@ -1760,6 +1772,8 @@ fail: brcmf_pcie_release_resource(devinfo); if (devinfo->ci) brcmf_chip_detach(devinfo->ci); + if (devinfo->settings) + brcmf_release_module_param(devinfo->settings); kfree(pcie_bus_dev); kfree(devinfo); return ret; @@ -1799,6 +1813,8 @@ brcmf_pcie_remove(struct pci_dev *pdev) if (devinfo->ci) brcmf_chip_detach(devinfo->ci); + if (devinfo->settings) + brcmf_release_module_param(devinfo->settings); kfree(devinfo); dev_set_drvdata(&pdev->dev, NULL); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 6e367041f691..43fd3f402eba 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -2442,15 +2442,17 @@ static void brcmf_sdio_bus_stop(struct device *dev) static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus) { + struct brcmf_sdio_dev *sdiodev; unsigned long flags; - if (bus->sdiodev->oob_irq_requested) { - spin_lock_irqsave(&bus->sdiodev->irq_en_lock, flags); - if (!bus->sdiodev->irq_en && !atomic_read(&bus->ipend)) { - enable_irq(bus->sdiodev->pdata->oob_irq_nr); - bus->sdiodev->irq_en = true; + sdiodev = bus->sdiodev; + if (sdiodev->oob_irq_requested) { + spin_lock_irqsave(&sdiodev->irq_en_lock, flags); + if (!sdiodev->irq_en && !atomic_read(&bus->ipend)) { + enable_irq(sdiodev->settings->bus.sdio.oob_irq_nr); + sdiodev->irq_en = true; } - spin_unlock_irqrestore(&bus->sdiodev->irq_en_lock, flags); + spin_unlock_irqrestore(&sdiodev->irq_en_lock, flags); } } @@ -3394,9 +3396,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) sizeof(u32)); } else { /* otherwise, set txglomalign */ - value = 4; - if (sdiodev->pdata) - value = sdiodev->pdata->sd_sgentry_align; + value = sdiodev->settings->bus.sdio.sd_sgentry_align; /* SDIO ADMA requires at least 32 bit alignment */ value = max_t(u32, value, 4); err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, @@ -3811,21 +3811,25 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) bus->ci = NULL; goto fail; } - sdiodev->pdata = brcmf_get_module_param(sdiodev->dev, + sdiodev->settings = brcmf_get_module_param(sdiodev->dev, BRCMF_BUSTYPE_SDIO, bus->ci->chip, bus->ci->chiprev); + if (!sdiodev->settings) { + brcmf_err("Failed to get device parameters\n"); + goto fail; + } /* platform specific configuration: * alignments must be at least 4 bytes for ADMA */ bus->head_align = ALIGNMENT; bus->sgentry_align = ALIGNMENT; - if (sdiodev->pdata) { - if (sdiodev->pdata->sd_head_align > ALIGNMENT) - bus->head_align = sdiodev->pdata->sd_head_align; - if (sdiodev->pdata->sd_sgentry_align > ALIGNMENT) - bus->sgentry_align = sdiodev->pdata->sd_sgentry_align; - } + if (sdiodev->settings->bus.sdio.sd_head_align > ALIGNMENT) + bus->head_align = sdiodev->settings->bus.sdio.sd_head_align; + if (sdiodev->settings->bus.sdio.sd_sgentry_align > ALIGNMENT) + bus->sgentry_align = + sdiodev->settings->bus.sdio.sd_sgentry_align; + /* allocate scatter-gather table. sg support * will be disabled upon allocation failure. */ @@ -3837,7 +3841,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) */ if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) && ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) || - (sdiodev->pdata && sdiodev->pdata->oob_irq_supported))) + (sdiodev->settings->bus.sdio.oob_irq_supported))) sdiodev->bus_if->wowl_supported = true; #endif @@ -3846,8 +3850,8 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus) goto fail; } - if ((sdiodev->pdata) && (sdiodev->pdata->drive_strength)) - drivestrength = sdiodev->pdata->drive_strength; + if (sdiodev->settings->bus.sdio.drive_strength) + drivestrength = sdiodev->settings->bus.sdio.drive_strength; else drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH; brcmf_sdio_drivestrengthinit(sdiodev, bus->ci, drivestrength); @@ -4124,7 +4128,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN; /* Attach to the common layer, reserve hdr space */ - ret = brcmf_attach(bus->sdiodev->dev); + ret = brcmf_attach(bus->sdiodev->dev, bus->sdiodev->settings); if (ret != 0) { brcmf_err("brcmf_attach failed\n"); goto fail; @@ -4228,6 +4232,8 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) } brcmf_chip_detach(bus->ci); } + if (bus->sdiodev->settings) + brcmf_release_module_param(bus->sdiodev->settings); kfree(bus->rxbuf); kfree(bus->hdrbuf); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 50df9cb21af2..dcf0ce8cd2c1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -184,7 +184,7 @@ struct brcmf_sdio_dev { struct brcmf_sdio *bus; struct device *dev; struct brcmf_bus *bus_if; - struct brcmfmac_sdio_pd *pdata; + struct brcmf_mp_device *settings; bool oob_irq_requested; bool irq_en; /* irq enable flags */ spinlock_t irq_en_lock; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index c72b7b352a77..869eb82db8b1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -27,6 +27,8 @@ #include "debug.h" #include "firmware.h" #include "usb.h" +#include "core.h" +#include "common.h" #define IOCTL_RESP_TIMEOUT msecs_to_jiffies(2000) @@ -171,6 +173,7 @@ struct brcmf_usbdev_info { struct urb *bulk_urb; /* used for FW download */ bool wowl_enabled; + struct brcmf_mp_device *settings; }; static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, @@ -1027,6 +1030,9 @@ static void brcmf_usb_detach(struct brcmf_usbdev_info *devinfo) kfree(devinfo->tx_reqs); kfree(devinfo->rx_reqs); + + if (devinfo->settings) + brcmf_release_module_param(devinfo->settings); } @@ -1136,7 +1142,7 @@ static int brcmf_usb_bus_setup(struct brcmf_usbdev_info *devinfo) int ret; /* Attach to the common driver interface */ - ret = brcmf_attach(devinfo->dev); + ret = brcmf_attach(devinfo->dev, devinfo->settings); if (ret) { brcmf_err("brcmf_attach failed\n"); return ret; @@ -1223,6 +1229,14 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo) bus->wowl_supported = true; #endif + devinfo->settings = brcmf_get_module_param(bus->dev, BRCMF_BUSTYPE_USB, + bus_pub->devid, + bus_pub->chiprev); + if (!devinfo->settings) { + ret = -ENOMEM; + goto fail; + } + if (!brcmf_usb_dlneeded(devinfo)) { ret = brcmf_usb_bus_setup(devinfo); if (ret) -- cgit v1.2.3 From 219e0f747ad6d2b504199dbbaee6b9ad269dde07 Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:09 +0100 Subject: brcmfmac: integrate add_keyext in add_key brcmf_add_keyext is called when a key is configured for a specific mac address. This function is very similar to the calling function brcmf_add_key. Integrate this function and also use existing del_key function in case key is to be cleared. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 154 ++++++--------------- 1 file changed, 39 insertions(+), 115 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5e3acaca7231..5203d8f38474 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -2073,84 +2073,34 @@ done: } static s32 -brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev, - u8 key_idx, const u8 *mac_addr, struct key_params *params) +brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, + u8 key_idx, bool pairwise, const u8 *mac_addr) { struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_wsec_key key; s32 err = 0; - u8 keybuf[8]; + + brcmf_dbg(TRACE, "Enter\n"); + if (!check_vif_up(ifp->vif)) + return -EIO; + + if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) { + /* we ignore this key index in this case */ + return -EINVAL; + } memset(&key, 0, sizeof(key)); - key.index = (u32) key_idx; - /* Instead of bcast for ea address for default wep keys, - driver needs it to be Null */ - if (!is_multicast_ether_addr(mac_addr)) - memcpy((char *)&key.ea, (void *)mac_addr, ETH_ALEN); - key.len = (u32) params->key_len; - /* check for key index change */ - if (key.len == 0) { - /* key delete */ - err = send_key_to_dongle(ifp, &key); - if (err) - brcmf_err("key delete error (%d)\n", err); - } else { - if (key.len > sizeof(key.data)) { - brcmf_err("Invalid key length (%d)\n", key.len); - return -EINVAL; - } - brcmf_dbg(CONN, "Setting the key index %d\n", key.index); - memcpy(key.data, params->key, key.len); + key.index = (u32)key_idx; + key.flags = BRCMF_PRIMARY_KEY; + key.algo = CRYPTO_ALGO_OFF; - if (!brcmf_is_apmode(ifp->vif) && - (params->cipher == WLAN_CIPHER_SUITE_TKIP)) { - brcmf_dbg(CONN, "Swapping RX/TX MIC key\n"); - memcpy(keybuf, &key.data[24], sizeof(keybuf)); - memcpy(&key.data[24], &key.data[16], sizeof(keybuf)); - memcpy(&key.data[16], keybuf, sizeof(keybuf)); - } + brcmf_dbg(CONN, "key index (%d)\n", key_idx); - /* if IW_ENCODE_EXT_RX_SEQ_VALID set */ - if (params->seq && params->seq_len == 6) { - /* rx iv */ - u8 *ivptr; - ivptr = (u8 *) params->seq; - key.rxiv.hi = (ivptr[5] << 24) | (ivptr[4] << 16) | - (ivptr[3] << 8) | ivptr[2]; - key.rxiv.lo = (ivptr[1] << 8) | ivptr[0]; - key.iv_initialized = true; - } + /* Set the new key/index */ + err = send_key_to_dongle(ifp, &key); - switch (params->cipher) { - case WLAN_CIPHER_SUITE_WEP40: - key.algo = CRYPTO_ALGO_WEP1; - brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP40\n"); - break; - case WLAN_CIPHER_SUITE_WEP104: - key.algo = CRYPTO_ALGO_WEP128; - brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n"); - break; - case WLAN_CIPHER_SUITE_TKIP: - key.algo = CRYPTO_ALGO_TKIP; - brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n"); - break; - case WLAN_CIPHER_SUITE_AES_CMAC: - key.algo = CRYPTO_ALGO_AES_CCM; - brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n"); - break; - case WLAN_CIPHER_SUITE_CCMP: - key.algo = CRYPTO_ALGO_AES_CCM; - brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_CCMP\n"); - break; - default: - brcmf_err("Invalid cipher (0x%x)\n", params->cipher); - return -EINVAL; - } - err = send_key_to_dongle(ifp, &key); - if (err) - brcmf_err("wsec_key error (%d)\n", err); - } + brcmf_dbg(TRACE, "Exit\n"); return err; } @@ -2163,8 +2113,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, struct brcmf_wsec_key *key; s32 val; s32 wsec; - s32 err = 0; + s32 err; u8 keybuf[8]; + bool ext_key; brcmf_dbg(TRACE, "Enter\n"); brcmf_dbg(CONN, "key index (%d)\n", key_idx); @@ -2177,27 +2128,32 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, return -EINVAL; } - if (mac_addr && - (params->cipher != WLAN_CIPHER_SUITE_WEP40) && - (params->cipher != WLAN_CIPHER_SUITE_WEP104)) { - brcmf_dbg(TRACE, "Exit"); - return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params); - } - - key = &ifp->vif->profile.key[key_idx]; - memset(key, 0, sizeof(*key)); + if (params->key_len == 0) + return brcmf_cfg80211_del_key(wiphy, ndev, key_idx, pairwise, + mac_addr); if (params->key_len > sizeof(key->data)) { brcmf_err("Too long key length (%u)\n", params->key_len); - err = -EINVAL; - goto done; + return -EINVAL; } + + ext_key = false; + if (mac_addr && (params->cipher != WLAN_CIPHER_SUITE_WEP40) && + (params->cipher != WLAN_CIPHER_SUITE_WEP104)) { + brcmf_dbg(TRACE, "Ext key, mac %pM", mac_addr); + ext_key = true; + } + + key = &ifp->vif->profile.key[key_idx]; + memset(key, 0, sizeof(*key)); + if ((ext_key) && (!is_multicast_ether_addr(mac_addr))) + memcpy((char *)&key->ea, (void *)mac_addr, ETH_ALEN); key->len = params->key_len; key->index = key_idx; - memcpy(key->data, params->key, key->len); + if (!ext_key) + key->flags = BRCMF_PRIMARY_KEY; - key->flags = BRCMF_PRIMARY_KEY; switch (params->cipher) { case WLAN_CIPHER_SUITE_WEP40: key->algo = CRYPTO_ALGO_WEP1; @@ -2237,7 +2193,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, } err = send_key_to_dongle(ifp, key); - if (err) + if (ext_key || err) goto done; err = brcmf_fil_bsscfg_int_get(ifp, "wsec", &wsec); @@ -2257,38 +2213,6 @@ done: return err; } -static s32 -brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, - u8 key_idx, bool pairwise, const u8 *mac_addr) -{ - struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_wsec_key key; - s32 err = 0; - - brcmf_dbg(TRACE, "Enter\n"); - if (!check_vif_up(ifp->vif)) - return -EIO; - - if (key_idx >= BRCMF_MAX_DEFAULT_KEYS) { - /* we ignore this key index in this case */ - return -EINVAL; - } - - memset(&key, 0, sizeof(key)); - - key.index = (u32) key_idx; - key.flags = BRCMF_PRIMARY_KEY; - key.algo = CRYPTO_ALGO_OFF; - - brcmf_dbg(CONN, "key index (%d)\n", key_idx); - - /* Set the new key/index */ - err = send_key_to_dongle(ifp, &key); - - brcmf_dbg(TRACE, "Exit\n"); - return err; -} - static s32 brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, -- cgit v1.2.3 From 240d61a9ddeb8a77f11369f67f6ef060354b909d Mon Sep 17 00:00:00 2001 From: Hante Meuleman Date: Wed, 17 Feb 2016 11:27:10 +0100 Subject: brcmfmac: add 802.11w management frame protection support Add full support for both AP and STA for management frame protection. Reviewed-by: Arend Van Spriel Reviewed-by: Franky (Zhenhui) Lin Reviewed-by: Pieter-Paul Giesberts Signed-off-by: Hante Meuleman Signed-off-by: Arend van Spriel Signed-off-by: Kalle Valo --- .../broadcom/brcm80211/brcmfmac/cfg80211.c | 272 +++++++++++++++------ .../broadcom/brcm80211/brcmfmac/cfg80211.h | 3 +- .../wireless/broadcom/brcm80211/brcmfmac/feature.c | 1 + .../wireless/broadcom/brcm80211/brcmfmac/feature.h | 4 +- .../broadcom/brcm80211/brcmfmac/fwil_types.h | 4 + .../broadcom/brcm80211/include/brcmu_wifi.h | 2 + 6 files changed, 211 insertions(+), 75 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5203d8f38474..d5c2a27573b4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -72,8 +72,13 @@ #define RSN_AKM_NONE 0 /* None (IBSS) */ #define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ #define RSN_AKM_PSK 2 /* Pre-shared Key */ +#define RSN_AKM_SHA256_1X 5 /* SHA256, 802.1X */ +#define RSN_AKM_SHA256_PSK 6 /* SHA256, Pre-shared Key */ #define RSN_CAP_LEN 2 /* Length of RSN capabilities */ -#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C +#define RSN_CAP_PTK_REPLAY_CNTR_MASK (BIT(2) | BIT(3)) +#define RSN_CAP_MFPR_MASK BIT(6) +#define RSN_CAP_MFPC_MASK BIT(7) +#define RSN_PMKID_COUNT_LEN 2 #define VNDR_IE_CMD_LEN 4 /* length of the set command * string :"add", "del" (+ NUL) @@ -211,12 +216,19 @@ static const struct ieee80211_regdomain brcmf_regdom = { REG_RULE(5470-10, 5850+10, 80, 6, 20, 0), } }; -static const u32 __wl_cipher_suites[] = { +/* Note: brcmf_cipher_suites is an array of int defining which cipher suites + * are supported. A pointer to this array and the number of entries is passed + * on to upper layers. AES_CMAC defines whether or not the driver supports MFP. + * So the cipher suite AES_CMAC has to be the last one in the array, and when + * device does not support MFP then the number of suites will be decreased by 1 + */ +static const u32 brcmf_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, - WLAN_CIPHER_SUITE_AES_CMAC, + /* Keep as last entry: */ + WLAN_CIPHER_SUITE_AES_CMAC }; /* Vendor specific ie. id = 221, oui and type defines exact ie */ @@ -1533,7 +1545,7 @@ static s32 brcmf_set_auth_type(struct net_device *ndev, static s32 brcmf_set_wsec_mode(struct net_device *ndev, - struct cfg80211_connect_params *sme, bool mfp) + struct cfg80211_connect_params *sme) { struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); struct brcmf_cfg80211_security *sec; @@ -1592,10 +1604,7 @@ brcmf_set_wsec_mode(struct net_device *ndev, sme->privacy) pval = AES_ENABLED; - if (mfp) - wsec = pval | gval | MFP_CAPABLE; - else - wsec = pval | gval; + wsec = pval | gval; err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wsec", wsec); if (err) { brcmf_err("error (%d)\n", err); @@ -1612,56 +1621,100 @@ brcmf_set_wsec_mode(struct net_device *ndev, static s32 brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme) { - struct brcmf_cfg80211_profile *profile = ndev_to_prof(ndev); - struct brcmf_cfg80211_security *sec; - s32 val = 0; - s32 err = 0; + struct brcmf_if *ifp = netdev_priv(ndev); + s32 val; + s32 err; + const struct brcmf_tlv *rsn_ie; + const u8 *ie; + u32 ie_len; + u32 offset; + u16 rsn_cap; + u32 mfp; + u16 count; - if (sme->crypto.n_akm_suites) { - err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), - "wpa_auth", &val); - if (err) { - brcmf_err("could not get wpa_auth (%d)\n", err); - return err; + if (!sme->crypto.n_akm_suites) + return 0; + + err = brcmf_fil_bsscfg_int_get(netdev_priv(ndev), "wpa_auth", &val); + if (err) { + brcmf_err("could not get wpa_auth (%d)\n", err); + return err; + } + if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA_AUTH_PSK; + break; + default: + brcmf_err("invalid cipher group (%d)\n", + sme->crypto.cipher_group); + return -EINVAL; } - if (val & (WPA_AUTH_PSK | WPA_AUTH_UNSPECIFIED)) { - switch (sme->crypto.akm_suites[0]) { - case WLAN_AKM_SUITE_8021X: - val = WPA_AUTH_UNSPECIFIED; - break; - case WLAN_AKM_SUITE_PSK: - val = WPA_AUTH_PSK; - break; - default: - brcmf_err("invalid cipher group (%d)\n", - sme->crypto.cipher_group); - return -EINVAL; - } - } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { - switch (sme->crypto.akm_suites[0]) { - case WLAN_AKM_SUITE_8021X: - val = WPA2_AUTH_UNSPECIFIED; - break; - case WLAN_AKM_SUITE_PSK: - val = WPA2_AUTH_PSK; - break; - default: - brcmf_err("invalid cipher group (%d)\n", - sme->crypto.cipher_group); - return -EINVAL; - } + } else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) { + switch (sme->crypto.akm_suites[0]) { + case WLAN_AKM_SUITE_8021X: + val = WPA2_AUTH_UNSPECIFIED; + break; + case WLAN_AKM_SUITE_8021X_SHA256: + val = WPA2_AUTH_1X_SHA256; + break; + case WLAN_AKM_SUITE_PSK_SHA256: + val = WPA2_AUTH_PSK_SHA256; + break; + case WLAN_AKM_SUITE_PSK: + val = WPA2_AUTH_PSK; + break; + default: + brcmf_err("invalid cipher group (%d)\n", + sme->crypto.cipher_group); + return -EINVAL; } + } - brcmf_dbg(CONN, "setting wpa_auth to %d\n", val); - err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), - "wpa_auth", val); - if (err) { - brcmf_err("could not set wpa_auth (%d)\n", err); - return err; - } + if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) + goto skip_mfp_config; + /* The MFP mode (1 or 2) needs to be determined, parse IEs. The + * IE will not be verified, just a quick search for MFP config + */ + rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie, sme->ie_len, + WLAN_EID_RSN); + if (!rsn_ie) + goto skip_mfp_config; + ie = (const u8 *)rsn_ie; + ie_len = rsn_ie->len + TLV_HDR_LEN; + /* Skip unicast suite */ + offset = TLV_HDR_LEN + WPA_IE_VERSION_LEN + WPA_IE_MIN_OUI_LEN; + if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len) + goto skip_mfp_config; + /* Skip multicast suite */ + count = ie[offset] + (ie[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN); + if (offset + WPA_IE_SUITE_COUNT_LEN >= ie_len) + goto skip_mfp_config; + /* Skip auth key management suite(s) */ + count = ie[offset] + (ie[offset + 1] << 8); + offset += WPA_IE_SUITE_COUNT_LEN + (count * WPA_IE_MIN_OUI_LEN); + if (offset + WPA_IE_SUITE_COUNT_LEN > ie_len) + goto skip_mfp_config; + /* Ready to read capabilities */ + mfp = BRCMF_MFP_NONE; + rsn_cap = ie[offset] + (ie[offset + 1] << 8); + if (rsn_cap & RSN_CAP_MFPR_MASK) + mfp = BRCMF_MFP_REQUIRED; + else if (rsn_cap & RSN_CAP_MFPC_MASK) + mfp = BRCMF_MFP_CAPABLE; + brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "mfp", mfp); + +skip_mfp_config: + brcmf_dbg(CONN, "setting wpa_auth to %d\n", val); + err = brcmf_fil_bsscfg_int_set(netdev_priv(ndev), "wpa_auth", val); + if (err) { + brcmf_err("could not set wpa_auth (%d)\n", err); + return err; } - sec = &profile->sec; - sec->wpa_auth = sme->crypto.akm_suites[0]; return err; } @@ -1827,7 +1880,7 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev, goto done; } - err = brcmf_set_wsec_mode(ndev, sme, sme->mfp == NL80211_MFP_REQUIRED); + err = brcmf_set_wsec_mode(ndev, sme); if (err) { brcmf_err("wl_set_set_cipher failed (%d)\n", err); goto done; @@ -2077,10 +2130,12 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, bool pairwise, const u8 *mac_addr) { struct brcmf_if *ifp = netdev_priv(ndev); - struct brcmf_wsec_key key; - s32 err = 0; + struct brcmf_wsec_key *key; + s32 err; brcmf_dbg(TRACE, "Enter\n"); + brcmf_dbg(CONN, "key index (%d)\n", key_idx); + if (!check_vif_up(ifp->vif)) return -EIO; @@ -2089,16 +2144,19 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, return -EINVAL; } - memset(&key, 0, sizeof(key)); + key = &ifp->vif->profile.key[key_idx]; - key.index = (u32)key_idx; - key.flags = BRCMF_PRIMARY_KEY; - key.algo = CRYPTO_ALGO_OFF; + if (key->algo == CRYPTO_ALGO_OFF) { + brcmf_dbg(CONN, "Ignore clearing of (never configured) key\n"); + return -EINVAL; + } - brcmf_dbg(CONN, "key index (%d)\n", key_idx); + memset(key, 0, sizeof(*key)); + key->index = (u32)key_idx; + key->flags = BRCMF_PRIMARY_KEY; - /* Set the new key/index */ - err = send_key_to_dongle(ifp, &key); + /* Clear the key/index */ + err = send_key_to_dongle(ifp, key); brcmf_dbg(TRACE, "Exit\n"); return err; @@ -2106,8 +2164,8 @@ brcmf_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev, static s32 brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev, - u8 key_idx, bool pairwise, const u8 *mac_addr, - struct key_params *params) + u8 key_idx, bool pairwise, const u8 *mac_addr, + struct key_params *params) { struct brcmf_if *ifp = netdev_priv(ndev); struct brcmf_wsec_key *key; @@ -2214,9 +2272,10 @@ done: } static s32 -brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, - u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie, - void (*callback) (void *cookie, struct key_params * params)) +brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev, u8 key_idx, + bool pairwise, const u8 *mac_addr, void *cookie, + void (*callback)(void *cookie, + struct key_params *params)) { struct key_params params; struct brcmf_if *ifp = netdev_priv(ndev); @@ -2268,8 +2327,15 @@ done: static s32 brcmf_cfg80211_config_default_mgmt_key(struct wiphy *wiphy, - struct net_device *ndev, u8 key_idx) + struct net_device *ndev, u8 key_idx) { + struct brcmf_if *ifp = netdev_priv(ndev); + + brcmf_dbg(TRACE, "Enter key_idx %d\n", key_idx); + + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) + return 0; + brcmf_dbg(INFO, "Not supported\n"); return -EOPNOTSUPP; @@ -3769,7 +3835,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, u32 auth = 0; /* d11 open authentication */ u16 count; s32 err = 0; - s32 len = 0; + s32 len; u32 i; u32 wsec; u32 pval = 0; @@ -3779,6 +3845,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, u8 *data; u16 rsn_cap; u32 wme_bss_disable; + u32 mfp; brcmf_dbg(TRACE, "Enter\n"); if (wpa_ie == NULL) @@ -3893,19 +3960,53 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, is_rsn_ie ? (wpa_auth |= WPA2_AUTH_PSK) : (wpa_auth |= WPA_AUTH_PSK); break; + case RSN_AKM_SHA256_PSK: + brcmf_dbg(TRACE, "RSN_AKM_MFP_PSK\n"); + wpa_auth |= WPA2_AUTH_PSK_SHA256; + break; + case RSN_AKM_SHA256_1X: + brcmf_dbg(TRACE, "RSN_AKM_MFP_1X\n"); + wpa_auth |= WPA2_AUTH_1X_SHA256; + break; default: brcmf_err("Ivalid key mgmt info\n"); } offset++; } + mfp = BRCMF_MFP_NONE; if (is_rsn_ie) { wme_bss_disable = 1; if ((offset + RSN_CAP_LEN) <= len) { rsn_cap = data[offset] + (data[offset + 1] << 8); if (rsn_cap & RSN_CAP_PTK_REPLAY_CNTR_MASK) wme_bss_disable = 0; + if (rsn_cap & RSN_CAP_MFPR_MASK) { + brcmf_dbg(TRACE, "MFP Required\n"); + mfp = BRCMF_MFP_REQUIRED; + /* Firmware only supports mfp required in + * combination with WPA2_AUTH_PSK_SHA256 or + * WPA2_AUTH_1X_SHA256. + */ + if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 | + WPA2_AUTH_1X_SHA256))) { + err = -EINVAL; + goto exit; + } + /* Firmware has requirement that WPA2_AUTH_PSK/ + * WPA2_AUTH_UNSPECIFIED be set, if SHA256 OUI + * is to be included in the rsn ie. + */ + if (wpa_auth & WPA2_AUTH_PSK_SHA256) + wpa_auth |= WPA2_AUTH_PSK; + else if (wpa_auth & WPA2_AUTH_1X_SHA256) + wpa_auth |= WPA2_AUTH_UNSPECIFIED; + } else if (rsn_cap & RSN_CAP_MFPC_MASK) { + brcmf_dbg(TRACE, "MFP Capable\n"); + mfp = BRCMF_MFP_CAPABLE; + } } + offset += RSN_CAP_LEN; /* set wme_bss_disable to sync RSN Capabilities */ err = brcmf_fil_bsscfg_int_set(ifp, "wme_bss_disable", wme_bss_disable); @@ -3913,6 +4014,21 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, brcmf_err("wme_bss_disable error %d\n", err); goto exit; } + + /* Skip PMKID cnt as it is know to be 0 for AP. */ + offset += RSN_PMKID_COUNT_LEN; + + /* See if there is BIP wpa suite left for MFP */ + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP) && + ((offset + WPA_IE_MIN_OUI_LEN) <= len)) { + err = brcmf_fil_bsscfg_data_set(ifp, "bip", + &data[offset], + WPA_IE_MIN_OUI_LEN); + if (err < 0) { + brcmf_err("bip error %d\n", err); + goto exit; + } + } } /* FOR WPS , set SES_OW_ENABLED */ wsec = (pval | gval | SES_OW_ENABLED); @@ -3929,6 +4045,16 @@ brcmf_configure_wpaie(struct brcmf_if *ifp, brcmf_err("wsec error %d\n", err); goto exit; } + /* Configure MFP, this needs to go after wsec otherwise the wsec command + * will overwrite the values set by MFP + */ + if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) { + err = brcmf_fil_bsscfg_int_set(ifp, "mfp", mfp); + if (err < 0) { + brcmf_err("mfp error %d\n", err); + goto exit; + } + } /* set upper-layer auth */ err = brcmf_fil_bsscfg_int_set(ifp, "wpa_auth", wpa_auth); if (err < 0) { @@ -6149,8 +6275,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp) wiphy->n_addresses = i; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; - wiphy->cipher_suites = __wl_cipher_suites; - wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites); + wiphy->cipher_suites = brcmf_cipher_suites; + wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites); + if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP)) + wiphy->n_cipher_suites--; wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT | WIPHY_FLAG_OFFCHAN_TX | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index e4c1d4364684..95e35bcc16ce 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -72,7 +72,7 @@ #define BRCMF_VNDR_IE_P2PAF_SHIFT 12 -#define BRCMF_MAX_DEFAULT_KEYS 4 +#define BRCMF_MAX_DEFAULT_KEYS 6 /* beacon loss timeout defaults */ #define BRCMF_DEFAULT_BCN_TIMEOUT_ROAM_ON 2 @@ -107,7 +107,6 @@ struct brcmf_cfg80211_security { u32 auth_type; u32 cipher_pairwise; u32 cipher_group; - u32 wpa_auth; }; /** diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c index bfa19bb632b7..62985f2c0853 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c @@ -161,6 +161,7 @@ void brcmf_feat_attach(struct brcmf_pub *drvr) ifp->drvr->feat_flags &= ~BIT(BRCMF_FEAT_MBSS); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_RSDB, "rsdb_mode"); brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_TDLS, "tdls_enable"); + brcmf_feat_iovar_int_get(ifp, BRCMF_FEAT_MFP, "mfp"); pfn_mac.version = BRCMF_PFN_MACADDR_CFG_VER; err = brcmf_fil_iovar_data_get(ifp, "pfn_macaddr", &pfn_mac, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h index a3efa35a5c21..db4733a95e28 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h @@ -30,6 +30,7 @@ * WOWL_ND: WOWL net detect (PNO) * WOWL_GTK: (WOWL) GTK rekeying offload * WOWL_ARP_ND: ARP and Neighbor Discovery offload support during WOWL. + * MFP: 802.11w Management Frame Protection. */ #define BRCMF_FEAT_LIST \ BRCMF_FEAT_DEF(MBSS) \ @@ -42,7 +43,8 @@ BRCMF_FEAT_DEF(SCAN_RANDOM_MAC) \ BRCMF_FEAT_DEF(WOWL_ND) \ BRCMF_FEAT_DEF(WOWL_GTK) \ - BRCMF_FEAT_DEF(WOWL_ARP_ND) + BRCMF_FEAT_DEF(WOWL_ARP_ND) \ + BRCMF_FEAT_DEF(MFP) /* * Quirks: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 6d41ae392a8f..a4118c0ef6ca 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -142,6 +142,10 @@ #define BRCMF_RSN_KEK_LENGTH 16 #define BRCMF_RSN_REPLAY_LEN 8 +#define BRCMF_MFP_NONE 0 +#define BRCMF_MFP_CAPABLE 1 +#define BRCMF_MFP_REQUIRED 2 + /* join preference types for join_pref iovar */ enum brcmf_join_pref_types { BRCMF_JOIN_PREF_RSSI = 1, diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h index 3f68dd5ecd11..7b9a77981df1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcmu_wifi.h @@ -236,6 +236,8 @@ static inline bool ac_bitmap_tst(u8 bitmap, int prec) #define WPA2_AUTH_RESERVED3 0x0200 #define WPA2_AUTH_RESERVED4 0x0400 #define WPA2_AUTH_RESERVED5 0x0800 +#define WPA2_AUTH_1X_SHA256 0x1000 /* 1X with SHA256 key derivation */ +#define WPA2_AUTH_PSK_SHA256 0x8000 /* PSK with SHA256 key derivation */ #define DOT11_DEFAULT_RTS_LEN 2347 #define DOT11_DEFAULT_FRAG_LEN 2346 -- cgit v1.2.3 From 8dd37c7cd442411f52b90e8b556d38324450de46 Mon Sep 17 00:00:00 2001 From: Amitoj Kaur Chawla Date: Wed, 17 Feb 2016 18:03:37 +0530 Subject: mwifiex: Use to_delayed_work() Introduce the use of to_delayed_work() helper function instead of open coding it with container_of() A simplified version of the Coccinelle semantic patch used to make this change is: // @@ expression a; symbol work; @@ - container_of(a, struct delayed_work, work) + to_delayed_work(a) // Signed-off-by: Amitoj Kaur Chawla Reviewed-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/11h.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/11h.c b/drivers/net/wireless/marvell/mwifiex/11h.c index 71a1b580796f..81c60d0a1bda 100644 --- a/drivers/net/wireless/marvell/mwifiex/11h.c +++ b/drivers/net/wireless/marvell/mwifiex/11h.c @@ -123,8 +123,7 @@ void mwifiex_11h_process_join(struct mwifiex_private *priv, u8 **buffer, void mwifiex_dfs_cac_work_queue(struct work_struct *work) { struct cfg80211_chan_def chandef; - struct delayed_work *delayed_work = - container_of(work, struct delayed_work, work); + struct delayed_work *delayed_work = to_delayed_work(work); struct mwifiex_private *priv = container_of(delayed_work, struct mwifiex_private, dfs_cac_work); @@ -289,8 +288,7 @@ int mwifiex_11h_handle_radar_detected(struct mwifiex_private *priv, void mwifiex_dfs_chan_sw_work_queue(struct work_struct *work) { struct mwifiex_uap_bss_param *bss_cfg; - struct delayed_work *delayed_work = - container_of(work, struct delayed_work, work); + struct delayed_work *delayed_work = to_delayed_work(work); struct mwifiex_private *priv = container_of(delayed_work, struct mwifiex_private, dfs_chan_sw_work); -- cgit v1.2.3 From 064b32f822afd9a578a4ec6bf754cd61cbe0c324 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sun, 21 Feb 2016 01:30:29 +0300 Subject: at76c50x-usb: avoid double usb_put_dev() after downloading internal firmware in at76_probe() There is no need in usb_put_dev() if at76_load_internal_fw() succeed. Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Signed-off-by: Kalle Valo --- drivers/net/wireless/atmel/at76c50x-usb.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index dab25136214a..1efb1d66e0b7 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -2481,9 +2481,7 @@ static int at76_probe(struct usb_interface *interface, dev_err(&interface->dev, "error %d downloading internal firmware\n", ret); - goto exit; } - usb_put_dev(udev); goto exit; } -- cgit v1.2.3 From ce2542435aadc9953656e8afddaab0caf0079a3a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 22 Feb 2016 11:35:46 +0000 Subject: rtlwifi: pass struct rtl_stats by reference as it is more efficient passing rtl_stats by value is inefficient; the structure is over 300 bytes in size and generally just one field (packet_report_type) is being accessed, so the pass by value is a relatively large overhead. This change just affects just the rx_command_packet calls. Signed-off-by: Colin Ian King Acked-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/pci.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c | 6 +++--- drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h | 2 +- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h | 2 +- drivers/net/wireless/realtek/rtlwifi/wifi.h | 2 +- 12 files changed, 16 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index 140d2541562d..283d608b9973 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -855,7 +855,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) } /* handle command packet here */ if (rtlpriv->cfg->ops->rx_command_packet && - rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { + rtlpriv->cfg->ops->rx_command_packet(hw, &stats, skb)) { dev_kfree_skb_any(skb); goto new_trx_end; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 791efbe6b18c..11701064b0e1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -851,7 +851,7 @@ void rtl88ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) } u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb) { return 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h index eab5ae0eb46c..5a24d194ac76 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h @@ -790,7 +790,7 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); u32 rtl88ee_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c index d39ee67f6113..24eff8ea4c2e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c @@ -1105,13 +1105,13 @@ void rtl92ee_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) } u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb) { u32 result = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); - switch (status.packet_report_type) { + switch (status->packet_report_type) { case NORMAL_RX: result = 0; break; @@ -1121,7 +1121,7 @@ u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, break; default: RT_TRACE(rtlpriv, COMP_RECV, DBG_TRACE, - "Unknown packet type %d\n", status.packet_report_type); + "Unknown packet type %d\n", status->packet_report_type); break; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h index 8f78ac9e6040..a4c38345233e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.h @@ -857,6 +857,6 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); u32 rtl92ee_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c index 2f7c144d7980..7b4a9b63583b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c @@ -710,7 +710,7 @@ void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) } u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb) { return 0; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h index 017da7e194d8..32970bf18856 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.h @@ -716,6 +716,6 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); u32 rtl8723e_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c index 338ec9a9d09b..60345975f9fd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c @@ -758,13 +758,13 @@ void rtl8723be_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) } u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb) { u32 result = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); - switch (status.packet_report_type) { + switch (status->packet_report_type) { case NORMAL_RX: result = 0; break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h index 45949ac4854c..40c36607b8b9 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.h @@ -620,6 +620,6 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); u32 rtl8723be_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index 174743aef943..41efaa148d13 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -998,13 +998,13 @@ void rtl8821ae_tx_polling(struct ieee80211_hw *hw, u8 hw_queue) } u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb) { u32 result = 0; struct rtl_priv *rtlpriv = rtl_priv(hw); - switch (status.packet_report_type) { + switch (status->packet_report_type) { case NORMAL_RX: result = 0; break; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h index 31409042d8dd..ad565bebf1d5 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.h @@ -615,6 +615,6 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, bool firstseg, bool lastseg, struct sk_buff *skb); u32 rtl8821ae_rx_command_packet(struct ieee80211_hw *hw, - struct rtl_stats status, + const struct rtl_stats *status, struct sk_buff *skb); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 9e3cdd732eca..b07b364b93ab 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2189,7 +2189,7 @@ struct rtl_hal_ops { bool (*get_btc_status) (void); bool (*is_fw_header)(struct rtlwifi_firmware_header *hdr); u32 (*rx_command_packet)(struct ieee80211_hw *hw, - struct rtl_stats status, struct sk_buff *skb); + const struct rtl_stats *status, struct sk_buff *skb); void (*add_wowlan_pattern)(struct ieee80211_hw *hw, struct rtl_wow_pattern *rtl_pattern, u8 index); -- cgit v1.2.3 From de651ce3d750c19a7b8b7cef22407ecc496619f4 Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 23 Feb 2016 05:16:16 -0800 Subject: mwifiex: add delay when tdls confirm frame is queued It is observed that driver may send the data packet to tdls peer before tdls peer receives tdls setup confirm frame. Similar race condition exists during tdls teardown procedure also. This patch adds 10 milliseconds delay to resolve the race. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/tdls.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c index 9275f9c3f869..150649602e98 100644 --- a/drivers/net/wireless/marvell/mwifiex/tdls.c +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c @@ -680,6 +680,13 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer, __net_timestamp(skb); mwifiex_queue_tx_pkt(priv, skb); + /* Delay 10ms to make sure tdls setup confirm/teardown frame + * is received by peer + */ + if (action_code == WLAN_TDLS_SETUP_CONFIRM || + action_code == WLAN_TDLS_TEARDOWN) + msleep_interruptible(10); + return 0; } -- cgit v1.2.3 From a6139b6271f9f95377fe3486aed6120c9142779b Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Tue, 23 Feb 2016 05:16:17 -0800 Subject: mwifiex: fix corner case association failure This patch corrects the error case in association path by returning -1. Earlier "media_connected" used to remain on in this error case causing failure for further association attempts. Signed-off-by: Amitkumar Karwar Fixes: b887664d882ee4 ('mwifiex: channel switch handling for station') Signed-off-by: Cathy Luo Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sta_ioctl.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c index 5cbee58f8781..d5c56eb9e985 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c @@ -314,6 +314,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, mwifiex_dbg(adapter, ERROR, "Attempt to reconnect on csa closed chan(%d)\n", bss_desc->channel); + ret = -1; goto done; } -- cgit v1.2.3 From 8b7ef8b66eb9127b6d2ee46348b3f743bb40facb Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 23 Feb 2016 05:16:18 -0800 Subject: mwifiex: add sdio multiport aggregation debug information This patch sdio multi port aggregation statistics which can be used for debugging. This debug data is collected in /sys/kernel/debug/mwifiex/mlan0/debug. Signed-off-by: Xinming Hu Signed-off-by: Cathy Luo Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/ioctl.h | 6 ++++++ drivers/net/wireless/marvell/mwifiex/main.h | 5 +++++ drivers/net/wireless/marvell/mwifiex/sdio.c | 10 ++++++++++ drivers/net/wireless/marvell/mwifiex/util.c | 20 ++++++++++++++++++++ 4 files changed, 41 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/ioctl.h b/drivers/net/wireless/marvell/mwifiex/ioctl.h index 14cfa37deb00..a5a48c183d37 100644 --- a/drivers/net/wireless/marvell/mwifiex/ioctl.h +++ b/drivers/net/wireless/marvell/mwifiex/ioctl.h @@ -184,6 +184,7 @@ struct mwifiex_ds_tx_ba_stream_tbl { }; #define DBG_CMD_NUM 5 +#define MWIFIEX_DBG_SDIO_MP_NUM 10 struct tdls_peer_info { u8 peer_addr[ETH_ALEN]; @@ -235,6 +236,11 @@ struct mwifiex_debug_info { u8 cmd_sent; u8 cmd_resp_received; u8 event_received; + u32 last_mp_wr_bitmap[MWIFIEX_DBG_SDIO_MP_NUM]; + u32 last_mp_wr_ports[MWIFIEX_DBG_SDIO_MP_NUM]; + u32 last_mp_wr_len[MWIFIEX_DBG_SDIO_MP_NUM]; + u32 last_mp_curr_wr_port[MWIFIEX_DBG_SDIO_MP_NUM]; + u8 last_sdio_mp_index; }; #define MWIFIEX_KEY_INDEX_UNICAST 0x40000000 diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index aea7aee46cf7..aafc4ab4e5ae 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -223,6 +223,11 @@ struct mwifiex_dbg { u16 last_cmd_resp_index; u16 last_event[DBG_CMD_NUM]; u16 last_event_index; + u32 last_mp_wr_bitmap[MWIFIEX_DBG_SDIO_MP_NUM]; + u32 last_mp_wr_ports[MWIFIEX_DBG_SDIO_MP_NUM]; + u32 last_mp_wr_len[MWIFIEX_DBG_SDIO_MP_NUM]; + u32 last_mp_curr_wr_port[MWIFIEX_DBG_SDIO_MP_NUM]; + u8 last_sdio_mp_index; }; enum MWIFIEX_HARDWARE_STATUS { diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index abf15dbdfe08..838b260f86fa 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1684,6 +1684,7 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, s32 f_precopy_cur_buf = 0; s32 f_postcopy_cur_buf = 0; u32 mport; + int index; if (!card->mpa_tx.enabled || (card->has_control_mask && (port == CTRL_PORT)) || @@ -1788,6 +1789,15 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, card->mpa_tx.buf_len, mport); + /* Save the last multi port tx aggreagation info to debug log */ + index = adapter->dbg.last_sdio_mp_index; + index = (index + 1) % MWIFIEX_DBG_SDIO_MP_NUM; + adapter->dbg.last_sdio_mp_index = index; + adapter->dbg.last_mp_wr_ports[index] = mport; + adapter->dbg.last_mp_wr_bitmap[index] = card->mp_wr_bitmap; + adapter->dbg.last_mp_wr_len[index] = card->mpa_tx.buf_len; + adapter->dbg.last_mp_curr_wr_port[index] = card->curr_wr_port; + MP_TX_AGGR_BUF_RESET(card); } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 0cec8a64473e..6681be0511c7 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -78,6 +78,16 @@ static struct mwifiex_debug_data items[] = { item_addr(last_event), DBG_CMD_NUM}, {"last_event_index", item_size(last_event_index), item_addr(last_event_index), 1}, + {"last_mp_wr_bitmap", item_size(last_mp_wr_bitmap), + item_addr(last_mp_wr_bitmap), MWIFIEX_DBG_SDIO_MP_NUM}, + {"last_mp_wr_ports", item_size(last_mp_wr_ports), + item_addr(last_mp_wr_ports), MWIFIEX_DBG_SDIO_MP_NUM}, + {"last_mp_wr_len", item_size(last_mp_wr_len), + item_addr(last_mp_wr_len), MWIFIEX_DBG_SDIO_MP_NUM}, + {"last_mp_curr_wr_port", item_size(last_mp_curr_wr_port), + item_addr(last_mp_curr_wr_port), MWIFIEX_DBG_SDIO_MP_NUM}, + {"last_sdio_mp_index", item_size(last_sdio_mp_index), + item_addr(last_sdio_mp_index), 1}, {"num_cmd_h2c_fail", item_size(num_cmd_host_to_card_failure), item_addr(num_cmd_host_to_card_failure), 1}, {"num_cmd_sleep_cfm_fail", @@ -233,6 +243,16 @@ int mwifiex_get_debug_info(struct mwifiex_private *priv, memcpy(info->last_event, adapter->dbg.last_event, sizeof(adapter->dbg.last_event)); info->last_event_index = adapter->dbg.last_event_index; + memcpy(info->last_mp_wr_bitmap, adapter->dbg.last_mp_wr_bitmap, + sizeof(adapter->dbg.last_mp_wr_bitmap)); + memcpy(info->last_mp_wr_ports, adapter->dbg.last_mp_wr_ports, + sizeof(adapter->dbg.last_mp_wr_ports)); + memcpy(info->last_mp_curr_wr_port, + adapter->dbg.last_mp_curr_wr_port, + sizeof(adapter->dbg.last_mp_curr_wr_port)); + memcpy(info->last_mp_wr_len, adapter->dbg.last_mp_wr_len, + sizeof(adapter->dbg.last_mp_wr_len)); + info->last_sdio_mp_index = adapter->dbg.last_sdio_mp_index; info->data_sent = adapter->data_sent; info->cmd_sent = adapter->cmd_sent; info->cmd_resp_received = adapter->cmd_resp_received; -- cgit v1.2.3 From 0cb52aac4d19510d13410ee4232c0248fabe750e Mon Sep 17 00:00:00 2001 From: Xinming Hu Date: Tue, 23 Feb 2016 05:16:19 -0800 Subject: mwifiex: do not set multiport flag for tx/rx single packet multiport address flag(0x1000) should not be set during sdio cmd53, if we have only one packet to read/write. Signed-off-by: Cathy Luo Signed-off-by: Xinming Hu Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/sdio.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 838b260f86fa..b2c839ae2c3c 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -1355,6 +1355,9 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter, card->mpa_rx.start_port; } + if (card->mpa_rx.pkt_cnt == 1) + mport = adapter->ioport + port; + if (mwifiex_read_data_sync(adapter, card->mpa_rx.buf, card->mpa_rx.buf_len, mport, 1)) goto error; @@ -1786,6 +1789,9 @@ static int mwifiex_host_to_card_mp_aggr(struct mwifiex_adapter *adapter, card->mpa_tx.start_port; } + if (card->mpa_tx.pkt_cnt == 1) + mport = adapter->ioport + port; + ret = mwifiex_write_data_to_card(adapter, card->mpa_tx.buf, card->mpa_tx.buf_len, mport); -- cgit v1.2.3 From f36f299068794ffc5026f25b6a1b3ed615ea832d Mon Sep 17 00:00:00 2001 From: Anthony Wong Date: Tue, 23 Feb 2016 23:09:22 +0800 Subject: rt2x00: add new rt2800usb device Buffalo WLI-UC-G450 Add USB ID 0411:01fd for Buffalo WLI-UC-G450 wireless adapter, RT chipset 3593 Signed-off-by: Anthony Wong Cc: stable@vger.kernel.org Acked-by: Stanislaw Gruszka Signed-off-by: Kalle Valo --- drivers/net/wireless/ralink/rt2x00/rt2800usb.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c index bf9afbf46c1b..4b0bb6b4f6f1 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c @@ -1026,6 +1026,7 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0411, 0x01a2) }, { USB_DEVICE(0x0411, 0x01ee) }, { USB_DEVICE(0x0411, 0x01a8) }, + { USB_DEVICE(0x0411, 0x01fd) }, /* Corega */ { USB_DEVICE(0x07aa, 0x002f) }, { USB_DEVICE(0x07aa, 0x003c) }, -- cgit v1.2.3 From 32962d5b43b748c43b9b0b32d02674c1ce6997a9 Mon Sep 17 00:00:00 2001 From: Ujjal Roy Date: Thu, 25 Feb 2016 00:49:23 +0530 Subject: mwifiex: Fixed incorrect indentation issue This patch fixes the incorrect indentation of the case label. Signed-off-by: Ujjal Roy Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index f2dce81ba36e..29b7f6eed240 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -676,7 +676,7 @@ mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) } break; - case MWIFIEX_BSS_ROLE_STA: + case MWIFIEX_BSS_ROLE_STA: if (priv->media_connected) { mwifiex_dbg(adapter, ERROR, "cannot change wiphy params when connected"); -- cgit v1.2.3 From 3a968d766a63267b3d6b7a66b9d7023b7d93eaae Mon Sep 17 00:00:00 2001 From: Ujjal Roy Date: Thu, 25 Feb 2016 00:49:43 +0530 Subject: mwifiex: Removed extra spaces before commas This patch fixes spaces before commas issue in coding style. Signed-off-by: Ujjal Roy Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/pcie.c | 2 +- drivers/net/wireless/marvell/mwifiex/uap_cmd.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index efb19e2e1169..de364381fe7b 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c @@ -1401,7 +1401,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb) return -1; } - if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE)) + if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE)) return -1; buf_pa = MWIFIEX_SKB_DMA_ADDR(skb); diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c index e791166d90c4..16d95b22fe5c 100644 --- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c @@ -192,7 +192,7 @@ mwifiex_set_ht_params(struct mwifiex_private *priv, } priv->ap_11n_enabled = 1; } else { - memset(&bss_cfg->ht_cap , 0, sizeof(struct ieee80211_ht_cap)); + memset(&bss_cfg->ht_cap, 0, sizeof(struct ieee80211_ht_cap)); bss_cfg->ht_cap.cap_info = cpu_to_le16(MWIFIEX_DEF_HT_CAP); bss_cfg->ht_cap.ampdu_params_info = MWIFIEX_DEF_AMPDU; } -- cgit v1.2.3 From 354a1947a02528b19ee326e230cb44399cd31865 Mon Sep 17 00:00:00 2001 From: Ujjal Roy Date: Thu, 25 Feb 2016 00:49:56 +0530 Subject: mwifiex: Added missing spaces around brackets This patch fixes the missing spaces issue in coding style. Signed-off-by: Ujjal Roy Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/usb.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c index e43aff932360..05108618430d 100644 --- a/drivers/net/wireless/marvell/mwifiex/usb.c +++ b/drivers/net/wireless/marvell/mwifiex/usb.c @@ -244,9 +244,9 @@ setup_for_next: if (card->rx_cmd_ep == context->ep) { mwifiex_usb_submit_rx_urb(context, size); } else { - if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING){ + if (atomic_read(&adapter->rx_pending) <= HIGH_RX_PENDING) { mwifiex_usb_submit_rx_urb(context, size); - }else{ + } else { context->skb = NULL; } } -- cgit v1.2.3 From 2b02a36d12192f2a86388913143cd1e399eb971c Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 25 Feb 2016 23:24:27 +0000 Subject: mt7601u: do not free dma_buf when ivp allocation fails If the allocation of ivp fails the error handling attempts to free an uninitialized dma_buf; this data structure just contains garbage on the stack, so the freeing will cause issues when the urb, buf and dma fields are free'd. Fix this by not free'ing the dma_buf if the ivp allocation fails. Signed-off-by: Colin Ian King Reviewed-by: Julian Calaby Signed-off-by: Kalle Valo --- drivers/net/wireless/mediatek/mt7601u/mcu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index fbb1986eda3c..91c4b3427965 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -362,7 +362,9 @@ mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw) int i, ret; ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL); - if (!ivb || mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { + if (!ivb) + return -ENOMEM; + if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { ret = -ENOMEM; goto error; } -- cgit v1.2.3 From 2e62f9b2a41e4ade1a0bb3c1bbda4defe4c67243 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 12 Feb 2016 10:15:43 +0100 Subject: bcma: drop unneeded fields from bcma_pflash struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most of info stored in this struct wasn't really used anywhere as we put all that data in platform data & resource as well. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_mips.c | 11 ++++------- include/linux/bcma/bcma_driver_chipcommon.h | 3 --- 2 files changed, 4 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index a40a203314db..20c134c016dc 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -288,18 +288,15 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) case BCMA_CC_FLASHT_PARA: bcma_debug(bus, "Found parallel flash\n"); pflash->present = true; - pflash->window = BCMA_SOC_FLASH2; - pflash->window_size = BCMA_SOC_FLASH2_SZ; if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & BCMA_CC_FLASH_CFG_DS) == 0) - pflash->buswidth = 1; + bcma_pflash_data.width = 1; else - pflash->buswidth = 2; + bcma_pflash_data.width = 2; - bcma_pflash_data.width = pflash->buswidth; - bcma_pflash_resource.start = pflash->window; - bcma_pflash_resource.end = pflash->window + pflash->window_size; + bcma_pflash_resource.start = BCMA_SOC_FLASH2; + bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ; break; default: diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 700d0c6f7480..16eaaad9dda5 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -579,9 +579,6 @@ struct bcma_chipcommon_pmu { #ifdef CONFIG_BCMA_DRIVER_MIPS struct bcma_pflash { bool present; - u8 buswidth; - u32 window; - u32 window_size; }; #ifdef CONFIG_BCMA_SFLASH -- cgit v1.2.3 From d6a3b51ada68c2bd3e184f4729ce626a1721cf74 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 12 Feb 2016 10:15:44 +0100 Subject: bcma: move parallel flash support to separated file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This follows the way of handling other flashes and cleans code a bit. As next task we will want to move flash code to ChipCommon driver as: 1) Flash controllers are accesible using ChipCommon registers 2) This code isn't MIPS specific This change prepares bcma for that. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/Kconfig | 5 +++ drivers/bcma/Makefile | 1 + drivers/bcma/bcma_private.h | 18 +++++++++-- drivers/bcma/driver_chipcommon_pflash.c | 49 +++++++++++++++++++++++++++++ drivers/bcma/driver_mips.c | 35 +-------------------- drivers/bcma/main.c | 2 +- include/linux/bcma/bcma_driver_chipcommon.h | 8 +++-- 7 files changed, 78 insertions(+), 40 deletions(-) create mode 100644 drivers/bcma/driver_chipcommon_pflash.c (limited to 'drivers') diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig index 023d448ed3fa..efdc2ae8441a 100644 --- a/drivers/bcma/Kconfig +++ b/drivers/bcma/Kconfig @@ -70,6 +70,11 @@ config BCMA_DRIVER_MIPS If unsure, say N +config BCMA_PFLASH + bool + depends on BCMA_DRIVER_MIPS + default y + config BCMA_SFLASH bool depends on BCMA_DRIVER_MIPS diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile index f32af9b76bcd..087948a1d20d 100644 --- a/drivers/bcma/Makefile +++ b/drivers/bcma/Makefile @@ -1,6 +1,7 @@ bcma-y += main.o scan.o core.o sprom.o bcma-y += driver_chipcommon.o driver_chipcommon_pmu.o bcma-y += driver_chipcommon_b.o +bcma-$(CONFIG_BCMA_PFLASH) += driver_chipcommon_pflash.o bcma-$(CONFIG_BCMA_SFLASH) += driver_chipcommon_sflash.o bcma-$(CONFIG_BCMA_NFLASH) += driver_chipcommon_nflash.o bcma-$(CONFIG_BCMA_DRIVER_PCI) += driver_pci.o diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h index 7e4ddfb076d3..eda09090cb52 100644 --- a/drivers/bcma/bcma_private.h +++ b/drivers/bcma/bcma_private.h @@ -47,9 +47,6 @@ int bcma_sprom_get(struct bcma_bus *bus); void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc); void bcma_core_chipcommon_init(struct bcma_drv_cc *cc); void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable); -#ifdef CONFIG_BCMA_DRIVER_MIPS -extern struct platform_device bcma_pflash_dev; -#endif /* CONFIG_BCMA_DRIVER_MIPS */ /* driver_chipcommon_b.c */ int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb); @@ -61,6 +58,21 @@ void bcma_pmu_init(struct bcma_drv_cc *cc); u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc); u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc); +/************************************************** + * driver_chipcommon_sflash.c + **************************************************/ + +#ifdef CONFIG_BCMA_PFLASH +extern struct platform_device bcma_pflash_dev; +int bcma_pflash_init(struct bcma_drv_cc *cc); +#else +static inline int bcma_pflash_init(struct bcma_drv_cc *cc) +{ + bcma_err(cc->core->bus, "Parallel flash not supported\n"); + return 0; +} +#endif /* CONFIG_BCMA_PFLASH */ + #ifdef CONFIG_BCMA_SFLASH /* driver_chipcommon_sflash.c */ int bcma_sflash_init(struct bcma_drv_cc *cc); diff --git a/drivers/bcma/driver_chipcommon_pflash.c b/drivers/bcma/driver_chipcommon_pflash.c new file mode 100644 index 000000000000..3b497c9ee0d4 --- /dev/null +++ b/drivers/bcma/driver_chipcommon_pflash.c @@ -0,0 +1,49 @@ +/* + * Broadcom specific AMBA + * ChipCommon parallel flash + * + * Licensed under the GNU/GPL. See COPYING for details. + */ + +#include "bcma_private.h" + +#include +#include +#include + +static const char * const part_probes[] = { "bcm47xxpart", NULL }; + +static struct physmap_flash_data bcma_pflash_data = { + .part_probe_types = part_probes, +}; + +static struct resource bcma_pflash_resource = { + .name = "bcma_pflash", + .flags = IORESOURCE_MEM, +}; + +struct platform_device bcma_pflash_dev = { + .name = "physmap-flash", + .dev = { + .platform_data = &bcma_pflash_data, + }, + .resource = &bcma_pflash_resource, + .num_resources = 1, +}; + +int bcma_pflash_init(struct bcma_drv_cc *cc) +{ + struct bcma_pflash *pflash = &cc->pflash; + + pflash->present = true; + + if (!(bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & BCMA_CC_FLASH_CFG_DS)) + bcma_pflash_data.width = 1; + else + bcma_pflash_data.width = 2; + + bcma_pflash_resource.start = BCMA_SOC_FLASH2; + bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ; + + return 0; +} diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 20c134c016dc..967b0e85e2cc 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -14,8 +14,6 @@ #include -#include -#include #include #include #include @@ -32,26 +30,6 @@ enum bcma_boot_dev { BCMA_BOOT_DEV_NAND, }; -static const char * const part_probes[] = { "bcm47xxpart", NULL }; - -static struct physmap_flash_data bcma_pflash_data = { - .part_probe_types = part_probes, -}; - -static struct resource bcma_pflash_resource = { - .name = "bcma_pflash", - .flags = IORESOURCE_MEM, -}; - -struct platform_device bcma_pflash_dev = { - .name = "physmap-flash", - .dev = { - .platform_data = &bcma_pflash_data, - }, - .resource = &bcma_pflash_resource, - .num_resources = 1, -}; - /* The 47162a0 hangs when reading MIPS DMP registers registers */ static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev) { @@ -276,7 +254,6 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) { struct bcma_bus *bus = mcore->core->bus; struct bcma_drv_cc *cc = &bus->drv_cc; - struct bcma_pflash *pflash = &cc->pflash; enum bcma_boot_dev boot_dev; switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { @@ -287,17 +264,7 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) break; case BCMA_CC_FLASHT_PARA: bcma_debug(bus, "Found parallel flash\n"); - pflash->present = true; - - if ((bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & - BCMA_CC_FLASH_CFG_DS) == 0) - bcma_pflash_data.width = 1; - else - bcma_pflash_data.width = 2; - - bcma_pflash_resource.start = BCMA_SOC_FLASH2; - bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ; - + bcma_pflash_init(cc); break; default: bcma_err(bus, "Flash type not supported\n"); diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index c466f752b067..786be8fed39e 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -350,7 +350,7 @@ static int bcma_register_devices(struct bcma_bus *bus) bcma_register_core(bus, core); } -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH if (bus->drv_cc.pflash.present) { err = platform_device_register(&bcma_pflash_dev); if (err) diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 16eaaad9dda5..846513c73606 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -576,10 +576,11 @@ struct bcma_chipcommon_pmu { u32 crystalfreq; /* The active crystal frequency (in kHz) */ }; -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH struct bcma_pflash { bool present; }; +#endif #ifdef CONFIG_BCMA_SFLASH struct mtd_info; @@ -603,6 +604,7 @@ struct bcma_nflash { }; #endif +#ifdef CONFIG_BCMA_DRIVER_MIPS struct bcma_serial_port { void *regs; unsigned long clockspeed; @@ -622,8 +624,9 @@ struct bcma_drv_cc { /* Fast Powerup Delay constant */ u16 fast_pwrup_delay; struct bcma_chipcommon_pmu pmu; -#ifdef CONFIG_BCMA_DRIVER_MIPS +#ifdef CONFIG_BCMA_PFLASH struct bcma_pflash pflash; +#endif #ifdef CONFIG_BCMA_SFLASH struct bcma_sflash sflash; #endif @@ -631,6 +634,7 @@ struct bcma_drv_cc { struct bcma_nflash nflash; #endif +#ifdef CONFIG_BCMA_DRIVER_MIPS int nr_serial_ports; struct bcma_serial_port serial_ports[4]; #endif /* CONFIG_BCMA_DRIVER_MIPS */ -- cgit v1.2.3 From 0ea6f0c582c2675285ad094df0137f4f0de47869 Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Fri, 12 Feb 2016 10:15:45 +0100 Subject: bcma: move flash detection code to ChipCommon core driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Flashes are attached and accessible through ChipCommon core, they aren't connected to MIPS one. Moreover some new ARM devices (e.g. BCM47189) may have serial flash accessibility using ChipCommon registers as well. To support them we can't keep this code in MIPS core driver. Signed-off-by: Rafał Miłecki Signed-off-by: Kalle Valo --- drivers/bcma/driver_chipcommon.c | 30 ++++++++++++++++++++++++++++++ drivers/bcma/driver_mips.c | 27 ++------------------------- 2 files changed, 32 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c index b0f44a2937b9..921ce1834673 100644 --- a/drivers/bcma/driver_chipcommon.c +++ b/drivers/bcma/driver_chipcommon.c @@ -115,6 +115,33 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc) return 0; } +static void bcma_core_chipcommon_flash_detect(struct bcma_drv_cc *cc) +{ + struct bcma_bus *bus = cc->core->bus; + + switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { + case BCMA_CC_FLASHT_STSER: + case BCMA_CC_FLASHT_ATSER: + bcma_debug(bus, "Found serial flash\n"); + bcma_sflash_init(cc); + break; + case BCMA_CC_FLASHT_PARA: + bcma_debug(bus, "Found parallel flash\n"); + bcma_pflash_init(cc); + break; + default: + bcma_err(bus, "Flash type not supported\n"); + } + + if (cc->core->id.rev == 38 || + bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { + if (cc->capabilities & BCMA_CC_CAP_NFLASH) { + bcma_debug(bus, "Found NAND flash\n"); + bcma_nflash_init(cc); + } + } +} + void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) { struct bcma_bus *bus = cc->core->bus; @@ -136,6 +163,9 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc) if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC) bcma_chipco_serial_init(cc); + if (bus->hosttype == BCMA_HOSTTYPE_SOC) + bcma_core_chipcommon_flash_detect(cc); + cc->early_setup_done = true; } diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c index 967b0e85e2cc..96f171328200 100644 --- a/drivers/bcma/driver_mips.c +++ b/drivers/bcma/driver_mips.c @@ -250,34 +250,11 @@ static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus) return BCMA_BOOT_DEV_SERIAL; } -static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore) +static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore) { struct bcma_bus *bus = mcore->core->bus; - struct bcma_drv_cc *cc = &bus->drv_cc; enum bcma_boot_dev boot_dev; - switch (cc->capabilities & BCMA_CC_CAP_FLASHT) { - case BCMA_CC_FLASHT_STSER: - case BCMA_CC_FLASHT_ATSER: - bcma_debug(bus, "Found serial flash\n"); - bcma_sflash_init(cc); - break; - case BCMA_CC_FLASHT_PARA: - bcma_debug(bus, "Found parallel flash\n"); - bcma_pflash_init(cc); - break; - default: - bcma_err(bus, "Flash type not supported\n"); - } - - if (cc->core->id.rev == 38 || - bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { - if (cc->capabilities & BCMA_CC_CAP_NFLASH) { - bcma_debug(bus, "Found NAND flash\n"); - bcma_nflash_init(cc); - } - } - /* Determine flash type this SoC boots from */ boot_dev = bcma_boot_dev(bus); switch (boot_dev) { @@ -304,7 +281,7 @@ void bcma_core_mips_early_init(struct bcma_drv_mips *mcore) if (mcore->early_setup_done) return; - bcma_core_mips_flash_detect(mcore); + bcma_core_mips_nvram_init(mcore); mcore->early_setup_done = true; } -- cgit v1.2.3 From 088c86183012495b53ecc1c734909e5712a40b66 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 4 Mar 2016 12:35:05 -0500 Subject: qed/qede: Add infrastructure support for hardware GRO This patch adds mainly structures and APIs prototype changes in order to give support for qede slowpath/fastpath support for the same. Signed-off-by: Yuval Mintz Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 14 ++++- drivers/net/ethernet/qlogic/qed/qed_l2.c | 81 +++++++++++++++++++--------- drivers/net/ethernet/qlogic/qede/qede_main.c | 17 +++--- include/linux/qed/qed_eth_if.h | 12 +++-- 4 files changed, 88 insertions(+), 36 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 592e0e6d9b42..236db8a99ec3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -2919,7 +2919,19 @@ struct eth_vport_rx_mode { }; struct eth_vport_tpa_param { - u64 reserved[2]; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + u8 max_buff_num; + u8 reserved; }; struct eth_vport_tx_mode { diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 102ddc73b841..3f35c6ca9252 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -132,16 +132,29 @@ struct qed_sp_vport_update_params { struct qed_filter_accept_flags accept_flags; }; +enum qed_tpa_mode { + QED_TPA_MODE_NONE, + QED_TPA_MODE_UNUSED, + QED_TPA_MODE_GRO, + QED_TPA_MODE_MAX +}; + +struct qed_sp_vport_start_params { + enum qed_tpa_mode tpa_mode; + bool remove_inner_vlan; + bool drop_ttl0; + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; + u16 mtu; +}; + #define QED_MAX_SGES_NUM 16 #define CRC32_POLY 0x1edc6f41 static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, - u32 concrete_fid, - u16 opaque_fid, - u8 vport_id, - u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg) + struct qed_sp_vport_start_params *p_params) { struct vport_start_ramrod_data *p_ramrod = NULL; struct qed_spq_entry *p_ent = NULL; @@ -150,13 +163,13 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, u16 rx_mode = 0; u8 abs_vport_id = 0; - rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); + rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc != 0) return rc; memset(&init_data, 0, sizeof(init_data)); init_data.cid = qed_spq_get_cid(p_hwfn); - init_data.opaque_fid = opaque_fid; + init_data.opaque_fid = p_params->opaque_fid; init_data.comp_mode = QED_SPQ_MODE_EBLOCK; rc = qed_sp_init_request(p_hwfn, &p_ent, @@ -168,9 +181,9 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, p_ramrod = &p_ent->ramrod.vport_start; p_ramrod->vport_id = abs_vport_id; - p_ramrod->mtu = cpu_to_le16(mtu); - p_ramrod->inner_vlan_removal_en = inner_vlan_removal_en_flg; - p_ramrod->drop_ttl0_en = drop_ttl0_flg; + p_ramrod->mtu = cpu_to_le16(p_params->mtu); + p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; + p_ramrod->drop_ttl0_en = p_params->drop_ttl0; SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); @@ -181,9 +194,26 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn, memset(&p_ramrod->tpa_param, 0, sizeof(struct eth_vport_tpa_param)); + p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; + + switch (p_params->tpa_mode) { + case QED_TPA_MODE_GRO: + p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + p_ramrod->tpa_param.tpa_max_size = (u16)-1; + p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; + p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; + p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; + p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; + p_ramrod->tpa_param.tpa_pkt_split_flg = 1; + p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; + break; + default: + break; + } + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev, - concrete_fid); + p_params->concrete_fid); return qed_spq_post(p_hwfn, p_ent, NULL); } @@ -1592,24 +1622,25 @@ static void qed_register_eth_ops(struct qed_dev *cdev, } static int qed_start_vport(struct qed_dev *cdev, - u8 vport_id, - u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg) + struct qed_start_vport_params *params) { int rc, i; for_each_hwfn(cdev, i) { + struct qed_sp_vport_start_params start = { 0 }; struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - rc = qed_sp_vport_start(p_hwfn, - p_hwfn->hw_info.concrete_fid, - p_hwfn->hw_info.opaque_fid, - vport_id, - mtu, - drop_ttl0_flg, - inner_vlan_removal_en_flg); - + start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO : + QED_TPA_MODE_NONE; + start.remove_inner_vlan = params->remove_inner_vlan; + start.drop_ttl0 = params->drop_ttl0; + start.opaque_fid = p_hwfn->hw_info.opaque_fid; + start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.vport_id = params->vport_id; + start.max_buffers_per_cqe = 16; + start.mtu = params->mtu; + + rc = qed_sp_vport_start(p_hwfn, &start); if (rc) { DP_ERR(cdev, "Failed to start VPORT\n"); return rc; @@ -1619,7 +1650,7 @@ static int qed_start_vport(struct qed_dev *cdev, DP_VERBOSE(cdev, (QED_MSG_SPQ | NETIF_MSG_IFUP), "Started V-PORT %d with MTU %d\n", - vport_id, mtu); + start.vport_id, start.mtu); } qed_reset_vport_stats(cdev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index ddd9e4aaa500..f75f334af7bd 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2466,11 +2466,12 @@ static int qede_stop_queues(struct qede_dev *edev) static int qede_start_queues(struct qede_dev *edev) { int rc, tc, i; - int vport_id = 0, drop_ttl0_flg = 1, vlan_removal_en = 1; + int vlan_removal_en = 1; struct qed_dev *cdev = edev->cdev; struct qed_update_vport_rss_params *rss_params = &edev->rss_params; struct qed_update_vport_params vport_update_params; struct qed_queue_start_common_params q_params; + struct qed_start_vport_params start = {0}; if (!edev->num_rss) { DP_ERR(edev, @@ -2478,10 +2479,12 @@ static int qede_start_queues(struct qede_dev *edev) return -EINVAL; } - rc = edev->ops->vport_start(cdev, vport_id, - edev->ndev->mtu, - drop_ttl0_flg, - vlan_removal_en); + start.mtu = edev->ndev->mtu; + start.vport_id = 0; + start.drop_ttl0 = true; + start.remove_inner_vlan = vlan_removal_en; + + rc = edev->ops->vport_start(cdev, &start); if (rc) { DP_ERR(edev, "Start V-PORT failed %d\n", rc); @@ -2490,7 +2493,7 @@ static int qede_start_queues(struct qede_dev *edev) DP_VERBOSE(edev, NETIF_MSG_IFUP, "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n", - vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); + start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en); for_each_rss(i) { struct qede_fastpath *fp = &edev->fp_array[i]; @@ -2555,7 +2558,7 @@ static int qede_start_queues(struct qede_dev *edev) /* Prepare and send the vport enable */ memset(&vport_update_params, 0, sizeof(vport_update_params)); - vport_update_params.vport_id = vport_id; + vport_update_params.vport_id = start.vport_id; vport_update_params.update_vport_active_flg = 1; vport_update_params.vport_active_flg = 1; diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index e53b0ca49e41..e1d69834a11f 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h @@ -39,6 +39,14 @@ struct qed_update_vport_params { struct qed_update_vport_rss_params rss_params; }; +struct qed_start_vport_params { + bool remove_inner_vlan; + bool gro_enable; + bool drop_ttl0; + u8 vport_id; + u16 mtu; +}; + struct qed_stop_rxq_params { u8 rss_id; u8 rx_queue_id; @@ -118,9 +126,7 @@ struct qed_eth_ops { void *cookie); int (*vport_start)(struct qed_dev *cdev, - u8 vport_id, u16 mtu, - u8 drop_ttl0_flg, - u8 inner_vlan_removal_en_flg); + struct qed_start_vport_params *params); int (*vport_stop)(struct qed_dev *cdev, u8 vport_id); -- cgit v1.2.3 From 55482edc25f0606851de42e73618f813f310d009 Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Fri, 4 Mar 2016 12:35:06 -0500 Subject: qede: Add slowpath/fastpath support and enable hardware GRO This patch configures hardware to use GRO and adds support for fastpath APIs to handle HW aggregated packets. Signed-off-by: Yuval Mintz Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede.h | 22 ++ drivers/net/ethernet/qlogic/qede/qede_main.c | 367 ++++++++++++++++++++++++++- 2 files changed, 388 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 02e17d331f22..d023251544d9 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -160,6 +160,7 @@ struct qede_dev { u16 q_num_rx_buffers; /* Must be a power of two */ u16 q_num_tx_buffers; /* Must be a power of two */ + bool gro_disable; struct list_head vlan_list; u16 configured_vlans; u16 non_configured_vlans; @@ -188,6 +189,24 @@ struct sw_rx_data { unsigned int page_offset; }; +enum qede_agg_state { + QEDE_AGG_STATE_NONE = 0, + QEDE_AGG_STATE_START = 1, + QEDE_AGG_STATE_ERROR = 2 +}; + +struct qede_agg_info { + struct sw_rx_data replace_buf; + dma_addr_t replace_buf_mapping; + struct sw_rx_data start_buf; + dma_addr_t start_buf_mapping; + struct eth_fast_path_rx_tpa_start_cqe start_cqe; + enum qede_agg_state agg_state; + struct sk_buff *skb; + int frag_id; + u16 vlan_tag; +}; + struct qede_rx_queue { __le16 *hw_cons_ptr; struct sw_rx_data *sw_rx_ring; @@ -197,6 +216,9 @@ struct qede_rx_queue { struct qed_chain rx_comp_ring; void __iomem *hw_rxq_prod_addr; + /* GRO */ + struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; + int rx_buf_size; unsigned int rx_buf_seg_size; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index f75f334af7bd..572862564ab6 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -866,6 +866,278 @@ static inline void qede_skb_receive(struct qede_dev *edev, napi_gro_receive(&fp->napi, skb); } +static void qede_set_gro_params(struct qede_dev *edev, + struct sk_buff *skb, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); + + if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & + PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + + skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - + cqe->header_len; +} + +static int qede_fill_frag_skb(struct qede_dev *edev, + struct qede_rx_queue *rxq, + u8 tpa_agg_index, + u16 len_on_bd) +{ + struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & + NUM_RX_BDS_MAX]; + struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; + struct sk_buff *skb = tpa_info->skb; + + if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START)) + goto out; + + /* Add one frag and update the appropriate fields in the skb */ + skb_fill_page_desc(skb, tpa_info->frag_id++, + current_bd->data, current_bd->page_offset, + len_on_bd); + + if (unlikely(qede_realloc_rx_buffer(edev, rxq, current_bd))) { + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + goto out; + } + + qed_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; + + skb->data_len += len_on_bd; + skb->truesize += rxq->rx_buf_seg_size; + skb->len += len_on_bd; + + return 0; + +out: + return -ENOMEM; +} + +static void qede_tpa_start(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_start_cqe *cqe) +{ + struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + struct eth_rx_bd *rx_bd_cons = qed_chain_consume(&rxq->rx_bd_ring); + struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); + struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + dma_addr_t mapping = tpa_info->replace_buf_mapping; + struct sw_rx_data *sw_rx_data_cons; + struct sw_rx_data *sw_rx_data_prod; + enum pkt_hash_types rxhash_type; + u32 rxhash; + + sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; + sw_rx_data_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; + + /* Use pre-allocated replacement buffer - we can't release the agg. + * start until its over and we don't want to risk allocation failing + * here, so re-allocate when aggregation will be over. + */ + dma_unmap_addr_set(sw_rx_data_prod, mapping, + dma_unmap_addr(replace_buf, mapping)); + + sw_rx_data_prod->data = replace_buf->data; + rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping)); + rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(mapping)); + sw_rx_data_prod->page_offset = replace_buf->page_offset; + + rxq->sw_rx_prod++; + + /* move partial skb from cons to pool (don't unmap yet) + * save mapping, incase we drop the packet later on. + */ + tpa_info->start_buf = *sw_rx_data_cons; + mapping = HILO_U64(le32_to_cpu(rx_bd_cons->addr.hi), + le32_to_cpu(rx_bd_cons->addr.lo)); + + tpa_info->start_buf_mapping = mapping; + rxq->sw_rx_cons++; + + /* set tpa state to start only if we are able to allocate skb + * for this aggregation, otherwise mark as error and aggregation will + * be dropped + */ + tpa_info->skb = netdev_alloc_skb(edev->ndev, + le16_to_cpu(cqe->len_on_first_bd)); + if (unlikely(!tpa_info->skb)) { + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + return; + } + + skb_put(tpa_info->skb, le16_to_cpu(cqe->len_on_first_bd)); + memcpy(&tpa_info->start_cqe, cqe, sizeof(tpa_info->start_cqe)); + + /* Start filling in the aggregation info */ + tpa_info->frag_id = 0; + tpa_info->agg_state = QEDE_AGG_STATE_START; + + rxhash = qede_get_rxhash(edev, cqe->bitfields, + cqe->rss_hash, &rxhash_type); + skb_set_hash(tpa_info->skb, rxhash, rxhash_type); + if ((le16_to_cpu(cqe->pars_flags.flags) >> + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) & + PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK) + tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); + else + tpa_info->vlan_tag = 0; + + /* This is needed in order to enable forwarding support */ + qede_set_gro_params(edev, tpa_info->skb, cqe); + + if (likely(cqe->ext_bd_len_list[0])) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->ext_bd_len_list[0])); + + if (unlikely(cqe->ext_bd_len_list[1])) { + DP_ERR(edev, + "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); + tpa_info->agg_state = QEDE_AGG_STATE_ERROR; + } +} + +static void qede_gro_ip_csum(struct sk_buff *skb) +{ + const struct iphdr *iph = ip_hdr(skb); + struct tcphdr *th; + + skb_set_network_header(skb, 0); + skb_set_transport_header(skb, sizeof(struct iphdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), + iph->saddr, iph->daddr, 0); + + tcp_gro_complete(skb); +} + +static void qede_gro_ipv6_csum(struct sk_buff *skb) +{ + struct ipv6hdr *iph = ipv6_hdr(skb); + struct tcphdr *th; + + skb_set_network_header(skb, 0); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + th = tcp_hdr(skb); + + th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), + &iph->saddr, &iph->daddr, 0); + tcp_gro_complete(skb); +} + +static void qede_gro_receive(struct qede_dev *edev, + struct qede_fastpath *fp, + struct sk_buff *skb, + u16 vlan_tag) +{ + if (skb_shinfo(skb)->gso_size) { + switch (skb->protocol) { + case htons(ETH_P_IP): + qede_gro_ip_csum(skb); + break; + case htons(ETH_P_IPV6): + qede_gro_ipv6_csum(skb); + break; + default: + DP_ERR(edev, + "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n", + ntohs(skb->protocol)); + } + } + + skb_record_rx_queue(skb, fp->rss_id); + qede_skb_receive(edev, fp, skb, vlan_tag); +} + +static inline void qede_tpa_cont(struct qede_dev *edev, + struct qede_rx_queue *rxq, + struct eth_fast_path_rx_tpa_cont_cqe *cqe) +{ + int i; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA cont with more than a single len_list entry\n"); +} + +static void qede_tpa_end(struct qede_dev *edev, + struct qede_fastpath *fp, + struct eth_fast_path_rx_tpa_end_cqe *cqe) +{ + struct qede_rx_queue *rxq = fp->rxq; + struct qede_agg_info *tpa_info; + struct sk_buff *skb; + int i; + + tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; + skb = tpa_info->skb; + + for (i = 0; cqe->len_list[i]; i++) + qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, + le16_to_cpu(cqe->len_list[i])); + if (unlikely(i > 1)) + DP_ERR(edev, + "Strange - TPA emd with more than a single len_list entry\n"); + + if (unlikely(tpa_info->agg_state != QEDE_AGG_STATE_START)) + goto err; + + /* Sanity */ + if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) + DP_ERR(edev, + "Strange - TPA had %02x BDs, but SKB has only %d frags\n", + cqe->num_of_bds, tpa_info->frag_id); + if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) + DP_ERR(edev, + "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", + le16_to_cpu(cqe->total_packet_len), skb->len); + + memcpy(skb->data, + page_address(tpa_info->start_buf.data) + + tpa_info->start_cqe.placement_offset + + tpa_info->start_buf.page_offset, + le16_to_cpu(tpa_info->start_cqe.len_on_first_bd)); + + /* Recycle [mapped] start buffer for the next replacement */ + tpa_info->replace_buf = tpa_info->start_buf; + tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping; + + /* Finalize the SKB */ + skb->protocol = eth_type_trans(skb, edev->ndev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); + + qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); + + tpa_info->agg_state = QEDE_AGG_STATE_NONE; + + return; +err: + /* The BD starting the aggregation is still mapped; Re-use it for + * future aggregations [as replacement buffer] + */ + memcpy(&tpa_info->replace_buf, &tpa_info->start_buf, + sizeof(struct sw_rx_data)); + tpa_info->replace_buf_mapping = tpa_info->start_buf_mapping; + tpa_info->start_buf.data = NULL; + tpa_info->agg_state = QEDE_AGG_STATE_NONE; + dev_kfree_skb_any(tpa_info->skb); + tpa_info->skb = NULL; +} + static u8 qede_check_csum(u16 flag) { u16 csum_flag = 0; @@ -931,6 +1203,25 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) goto next_cqe; } + if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { + switch (cqe_type) { + case ETH_RX_CQE_TYPE_TPA_START: + qede_tpa_start(edev, rxq, + &cqe->fast_path_tpa_start); + goto next_cqe; + case ETH_RX_CQE_TYPE_TPA_CONT: + qede_tpa_cont(edev, rxq, + &cqe->fast_path_tpa_cont); + goto next_cqe; + case ETH_RX_CQE_TYPE_TPA_END: + qede_tpa_end(edev, fp, + &cqe->fast_path_tpa_end); + goto next_rx_only; + default: + break; + } + } + /* Get the data from the SW ring */ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX; sw_rx_data = &rxq->sw_rx_ring[sw_rx_index]; @@ -1057,9 +1348,9 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget) qede_skb_receive(edev, fp, skb, le16_to_cpu(fp_cqe->vlan_tag)); qed_chain_consume(&rxq->rx_bd_ring); - next_rx: rxq->sw_rx_cons++; +next_rx_only: rx_pkt++; next_cqe: /* don't consume bd rx buffer */ @@ -1952,9 +2243,31 @@ static void qede_free_rx_buffers(struct qede_dev *edev, } } +static void qede_free_sge_mem(struct qede_dev *edev, + struct qede_rx_queue *rxq) { + int i; + + if (edev->gro_disable) + return; + + for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { + struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; + struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + + if (replace_buf) { + dma_unmap_page(&edev->pdev->dev, + dma_unmap_addr(replace_buf, mapping), + PAGE_SIZE, DMA_FROM_DEVICE); + __free_page(replace_buf->data); + } + } +} + static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) { + qede_free_sge_mem(edev, rxq); + /* Free rx buffers */ qede_free_rx_buffers(edev, rxq); @@ -2010,6 +2323,53 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev, return 0; } +static int qede_alloc_sge_mem(struct qede_dev *edev, + struct qede_rx_queue *rxq) +{ + dma_addr_t mapping; + int i; + + if (edev->gro_disable) + return 0; + + if (edev->ndev->mtu > PAGE_SIZE) { + edev->gro_disable = 1; + return 0; + } + + for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { + struct qede_agg_info *tpa_info = &rxq->tpa_info[i]; + struct sw_rx_data *replace_buf = &tpa_info->replace_buf; + + replace_buf->data = alloc_pages(GFP_ATOMIC, 0); + if (unlikely(!replace_buf->data)) { + DP_NOTICE(edev, + "Failed to allocate TPA skb pool [replacement buffer]\n"); + goto err; + } + + mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, + rxq->rx_buf_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { + DP_NOTICE(edev, + "Failed to map TPA replacement buffer\n"); + goto err; + } + + dma_unmap_addr_set(replace_buf, mapping, mapping); + tpa_info->replace_buf.page_offset = 0; + + tpa_info->replace_buf_mapping = mapping; + tpa_info->agg_state = QEDE_AGG_STATE_NONE; + } + + return 0; +err: + qede_free_sge_mem(edev, rxq); + edev->gro_disable = 1; + return -ENOMEM; +} + /* This function allocates all memory needed per Rx queue */ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) @@ -2071,6 +2431,8 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, num_allocated); } + qede_alloc_sge_mem(edev, rxq); + return 0; err: @@ -2233,6 +2595,8 @@ static void qede_init_fp(struct qede_dev *edev) snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", edev->ndev->name, rss_id); } + + edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO); } static int qede_set_real_num_queues(struct qede_dev *edev) @@ -2479,6 +2843,7 @@ static int qede_start_queues(struct qede_dev *edev) return -EINVAL; } + start.gro_enable = !edev->gro_disable; start.mtu = edev->ndev->mtu; start.vport_id = 0; start.drop_ttl0 = true; -- cgit v1.2.3 From d66ab51442211158b677c2f12310c314d9587f74 Mon Sep 17 00:00:00 2001 From: Haiyang Zhang Date: Fri, 4 Mar 2016 15:07:43 -0800 Subject: hv_netvsc: Move subchannel waiting to rndis_filter_device_remove() During hot add, vmbus_device_register() is called from vmbus_onoffer(), on the same workqueue as the subchannel offer message work-queue, so subchannel offer won't be processed until the vmbus_device_register()/... /netvsc_probe() is done. Also, vmbus_device_register() is called with channel_mutex locked, which prevents subchannel processing too. So the "waiting for sub-channel processing" will not success in hot add case. But, in usual module loading, the netvsc_probe() is called from different code path, and doesn't fail. This patch resolves the deadlock during NIC hot-add, and speeds up NIC loading time. Signed-off-by: Haiyang Zhang Reviewed-by: K. Y. Srinivasan Signed-off-by: David S. Miller --- drivers/net/hyperv/rndis_filter.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index a37bbda37ffa..47d07c576a34 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -1175,22 +1175,18 @@ int rndis_filter_device_add(struct hv_device *dev, ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn); /* - * Wait for the host to send us the sub-channel offers. + * Set the number of sub-channels to be received. */ spin_lock_irqsave(&net_device->sc_lock, flags); sc_delta = num_rss_qs - (net_device->num_chn - 1); net_device->num_sc_offered -= sc_delta; spin_unlock_irqrestore(&net_device->sc_lock, flags); - while (net_device->num_sc_offered != 0) { - t = wait_for_completion_timeout(&net_device->channel_init_wait, 10*HZ); - if (t == 0) - WARN(1, "Netvsc: Waiting for sub-channel processing"); - } out: if (ret) { net_device->max_chn = 1; net_device->num_chn = 1; + net_device->num_sc_offered = 0; } return 0; /* return 0 because primary channel can be used alone */ @@ -1204,6 +1200,17 @@ void rndis_filter_device_remove(struct hv_device *dev) { struct netvsc_device *net_dev = hv_get_drvdata(dev); struct rndis_device *rndis_dev = net_dev->extension; + unsigned long t; + + /* If not all subchannel offers are complete, wait for them until + * completion to avoid race. + */ + while (net_dev->num_sc_offered > 0) { + t = wait_for_completion_timeout(&net_dev->channel_init_wait, + 10 * HZ); + if (t == 0) + WARN(1, "Netvsc: Waiting for sub-channel processing"); + } /* Halt and release the rndis device */ rndis_filter_halt_device(rndis_dev); -- cgit v1.2.3 From 73fb270592164b1917442f8bff4c791d095ee2ef Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Thu, 25 Feb 2016 11:03:01 -0600 Subject: rtlwifi: Fix size of wireless mode variable Smatch reports the following warning: CHECK drivers/net/wireless/realtek/rtlwifi/rc.c drivers/net/wireless/realtek/rtlwifi/rc.c:144 _rtl_rc_rate_set_series() warn: impossible condition '(wireless_mode == 256) => (0-255 == 256)' This warning arises because commit acc6907b87a9 ("rtlwifi: Fix warning from ieee80211_get_tx_rates() when using 5G") now checks the wireless mode for WIRELESS_MODE_AC_ONLY (BIT(8)) in _rtl_rc_rate_set_series(). As a result, all quantities used to store the wireless mode must be u16. This patch also reorders struct rtl_sta_info to save a little space. Fixes: d76d65fd2695 ("rtlwifi: fix broken VHT support") Reported-by: Dan Williams Signed-off-by: Larry Finger Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtlwifi/rc.c | 4 ++-- drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | 2 +- drivers/net/wireless/realtek/rtlwifi/wifi.h | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c index 28f7010e7108..1aca77719521 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rc.c +++ b/drivers/net/wireless/realtek/rtlwifi/rc.c @@ -41,7 +41,7 @@ static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv, struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_sta_info *sta_entry = NULL; - u8 wireless_mode = 0; + u16 wireless_mode = 0; /* *this rate is no use for true rate, firmware @@ -99,7 +99,7 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv, { struct rtl_mac *mac = rtl_mac(rtlpriv); struct rtl_sta_info *sta_entry = NULL; - u8 wireless_mode = 0; + u16 wireless_mode = 0; u8 sgi_20 = 0, sgi_40 = 0, sgi_80 = 0; if (sta) { diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 5da9bd0e5002..fe900badd468 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c @@ -3837,7 +3837,7 @@ void rtl8821ae_update_channel_access_setting(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); - u8 wireless_mode = mac->mode; + u16 wireless_mode = mac->mode; u8 sifs_timer, r2t_sifs; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME, diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index b07b364b93ab..554d81420f19 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -1318,14 +1318,13 @@ struct rtl_tid_data { struct rtl_sta_info { struct list_head list; - u8 ratr_index; - u8 wireless_mode; - u8 mimo_ps; - u8 mac_addr[ETH_ALEN]; struct rtl_tid_data tids[MAX_TID_COUNT]; - /* just used for ap adhoc or mesh*/ struct rssi_sta rssi_stat; + u16 wireless_mode; + u8 ratr_index; + u8 mimo_ps; + u8 mac_addr[ETH_ALEN]; } __packed; struct rtl_priv; -- cgit v1.2.3 From 836856e3bd61d0644e5178a2c1b51d90459e2788 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 2 Mar 2016 16:59:02 +0100 Subject: wireless: cw1200: use __maybe_unused to hide pm functions_ The cw1200 uses #ifdef to check for CONFIG_PM, but then uses SIMPLE_DEV_PM_OPS, which leaves the references out when CONFIG_PM_SLEEP is not defined, so we get a warning with PM=y && PM_SLEEP=n: drivers/net/wireless/st/cw1200/cw1200_spi.c:450:12: error: 'cw1200_spi_suspend' defined but not used [-Werror=unused-function] This removes the incorrect #ifdef and instead uses a __maybe_unused annotation to let the compiler know it can silently drop the function definition. For the DEV_PM_OPS definition, we can use an IS_ENABLED() check to avoid defining the structure when CONFIG_PM is not set without the #ifdef. Signed-off-by: Arnd Bergmann Signed-off-by: Kalle Valo --- drivers/net/wireless/st/cw1200/cw1200_spi.c | 9 ++------- drivers/net/wireless/st/cw1200/pm.h | 9 +++++++-- 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/st/cw1200/cw1200_spi.c b/drivers/net/wireless/st/cw1200/cw1200_spi.c index a740083634d8..63f95e9c2992 100644 --- a/drivers/net/wireless/st/cw1200/cw1200_spi.c +++ b/drivers/net/wireless/st/cw1200/cw1200_spi.c @@ -446,8 +446,7 @@ static int cw1200_spi_disconnect(struct spi_device *func) return 0; } -#ifdef CONFIG_PM -static int cw1200_spi_suspend(struct device *dev) +static int __maybe_unused cw1200_spi_suspend(struct device *dev) { struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); @@ -460,16 +459,12 @@ static int cw1200_spi_suspend(struct device *dev) static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL); -#endif - static struct spi_driver spi_driver = { .probe = cw1200_spi_probe, .remove = cw1200_spi_disconnect, .driver = { .name = "cw1200_wlan_spi", -#ifdef CONFIG_PM - .pm = &cw1200_pm_ops, -#endif + .pm = IS_ENABLED(CONFIG_PM) ? &cw1200_pm_ops : NULL, }, }; diff --git a/drivers/net/wireless/st/cw1200/pm.h b/drivers/net/wireless/st/cw1200/pm.h index 3ed90ff22bb8..534548470ebc 100644 --- a/drivers/net/wireless/st/cw1200/pm.h +++ b/drivers/net/wireless/st/cw1200/pm.h @@ -31,13 +31,18 @@ int cw1200_pm_init(struct cw1200_pm_state *pm, void cw1200_pm_deinit(struct cw1200_pm_state *pm); int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); -int cw1200_wow_resume(struct ieee80211_hw *hw); int cw1200_can_suspend(struct cw1200_common *priv); +int cw1200_wow_resume(struct ieee80211_hw *hw); void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, unsigned long tmo); #else static inline void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, - unsigned long tmo) { + unsigned long tmo) +{ +} +static inline int cw1200_can_suspend(struct cw1200_common *priv) +{ + return 0; } #endif #endif -- cgit v1.2.3 From 14ca0751c96f8d3d0f52e8ed3b3236f8b34d3460 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 4 Mar 2016 15:15:06 +0100 Subject: bpf: support for access to tunnel options After eBPF being able to programmatically access/manage tunnel key meta data via commit d3aa45ce6b94 ("bpf: add helpers to access tunnel metadata") and more recently also for IPv6 through c6c33454072f ("bpf: support ipv6 for bpf_skb_{set,get}_tunnel_key"), this work adds two complementary helpers to generically access their auxiliary tunnel options. Geneve and vxlan support this facility. For geneve, TLVs can be pushed, and for the vxlan case its GBP extension. I.e. setting tunnel key for geneve case only makes sense, if we can also read/write TLVs into it. In the GBP case, it provides the flexibility to easily map the group policy ID in combination with other helpers or maps. I chose to model this as two separate helpers, bpf_skb_{set,get}_tunnel_opt(), for a couple of reasons. bpf_skb_{set,get}_tunnel_key() is already rather complex by itself, and there may be cases for tunnel key backends where tunnel options are not always needed. If we would have integrated this into bpf_skb_{set,get}_tunnel_key() nevertheless, we are very limited with remaining helper arguments, so keeping compatibility on structs in case of passing in a flat buffer gets more cumbersome. Separating both also allows for more flexibility and future extensibility, f.e. options could be fed directly from a map, etc. Moreover, change geneve's xmit path to test only for info->options_len instead of TUNNEL_GENEVE_OPT flag. This makes it more consistent with vxlan's xmit path and allows for avoiding to specify a protocol flag in the API on xmit, so it can be protocol agnostic. Having info->options_len is enough information that is needed. Tested with vxlan and geneve. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller --- drivers/net/geneve.c | 4 +-- include/uapi/linux/bpf.h | 11 +++++++ net/core/filter.c | 83 ++++++++++++++++++++++++++++++++++++++++++++---- 3 files changed, 90 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index bc5da357e16d..36db4cf0579c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -940,7 +940,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, u8 vni[3]; tunnel_id_to_vni(key->tun_id, vni); - if (key->tun_flags & TUNNEL_GENEVE_OPT) + if (info->options_len) opts = ip_tunnel_info_opts(info); if (key->tun_flags & TUNNEL_CSUM) @@ -1027,7 +1027,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, u8 vni[3]; tunnel_id_to_vni(key->tun_id, vni); - if (key->tun_flags & TUNNEL_GENEVE_OPT) + if (info->options_len) opts = ip_tunnel_info_opts(info); if (key->tun_flags & TUNNEL_CSUM) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 21ee6d52016f..9221f653fee3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -298,6 +298,17 @@ enum bpf_func_id { * Return: csum result */ BPF_FUNC_csum_diff, + + /** + * bpf_skb_[gs]et_tunnel_opt(skb, opt, size) + * retrieve or populate tunnel options metadata + * @skb: pointer to skb + * @opt: pointer to raw tunnel option data + * @size: size of @opt + * Return: 0 on success for set, option size for get + */ + BPF_FUNC_skb_get_tunnel_opt, + BPF_FUNC_skb_set_tunnel_opt, __BPF_FUNC_MAX_ID, }; diff --git a/net/core/filter.c b/net/core/filter.c index 6c9d15561d04..012a10c2da94 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1809,6 +1809,32 @@ static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { .arg4_type = ARG_ANYTHING, }; +static u64 bpf_skb_get_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *) (long) r1; + u8 *to = (u8 *) (long) r2; + const struct ip_tunnel_info *info = skb_tunnel_info(skb); + + if (unlikely(!info || + !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) + return -ENOENT; + if (unlikely(size < info->options_len)) + return -ENOMEM; + + ip_tunnel_info_opts_get(to, info); + + return info->options_len; +} + +static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { + .func = bpf_skb_get_tunnel_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_STACK, + .arg3_type = ARG_CONST_STACK_SIZE, +}; + static struct metadata_dst __percpu *md_dst; static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) @@ -1875,17 +1901,58 @@ static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { .arg4_type = ARG_ANYTHING, }; -static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void) +#define BPF_TUNLEN_MAX 255 + +static u64 bpf_skb_set_tunnel_opt(u64 r1, u64 r2, u64 size, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *) (long) r1; + u8 *from = (u8 *) (long) r2; + struct ip_tunnel_info *info = skb_tunnel_info(skb); + const struct metadata_dst *md = this_cpu_ptr(md_dst); + + if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) + return -EINVAL; + if (unlikely(size > BPF_TUNLEN_MAX)) + return -ENOMEM; + + ip_tunnel_info_opts_set(info, from, size); + + return 0; +} + +static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { + .func = bpf_skb_set_tunnel_opt, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_PTR_TO_STACK, + .arg3_type = ARG_CONST_STACK_SIZE, +}; + +static const struct bpf_func_proto * +bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) { if (!md_dst) { - /* race is not possible, since it's called from - * verifier that is holding verifier mutex + BUILD_BUG_ON(FIELD_SIZEOF(struct ip_tunnel_info, + options_len) != 1); + + /* Race is not possible, since it's called from verifier + * that is holding verifier mutex. */ - md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL); + md_dst = metadata_dst_alloc_percpu(BPF_TUNLEN_MAX, + GFP_KERNEL); if (!md_dst) return NULL; } - return &bpf_skb_set_tunnel_key_proto; + + switch (which) { + case BPF_FUNC_skb_set_tunnel_key: + return &bpf_skb_set_tunnel_key_proto; + case BPF_FUNC_skb_set_tunnel_opt: + return &bpf_skb_set_tunnel_opt_proto; + default: + return NULL; + } } static const struct bpf_func_proto * @@ -1939,7 +2006,11 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: - return bpf_get_skb_set_tunnel_key_proto(); + return bpf_get_skb_set_tunnel_proto(func_id); + case BPF_FUNC_skb_get_tunnel_opt: + return &bpf_skb_get_tunnel_opt_proto; + case BPF_FUNC_skb_set_tunnel_opt: + return bpf_get_skb_set_tunnel_proto(func_id); case BPF_FUNC_redirect: return &bpf_redirect_proto; case BPF_FUNC_get_route_realm: -- cgit v1.2.3 From db3c6139e6ead91b42e7c2ad044ed8beaee884e6 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 4 Mar 2016 15:15:07 +0100 Subject: bpf, vxlan, geneve, gre: fix usage of dst_cache on xmit The assumptions from commit 0c1d70af924b ("net: use dst_cache for vxlan device"), 468dfffcd762 ("geneve: add dst caching support") and 3c1cb4d2604c ("net/ipv4: add dst cache support for gre lwtunnels") on dst_cache usage when ip_tunnel_info is used is unfortunately not always valid as assumed. While it seems correct for ip_tunnel_info front-ends such as OVS, eBPF however can fill in ip_tunnel_info for consumers like vxlan, geneve or gre with different remote dsts, tos, etc, therefore they cannot be assumed as packet independent. Right now vxlan, geneve, gre would cache the dst for eBPF and every packet would reuse the same entry that was first created on the initial route lookup. eBPF doesn't store/cache the ip_tunnel_info, so each skb may have a different one. Fix it by adding a flag that checks the ip_tunnel_info. Also the !tos test in vxlan needs to be handeled differently in this context as it is currently inferred from ip_tunnel_info as well if present. ip_tunnel_dst_cache_usable() helper is added for the three tunnel cases, which checks if we can use dst cache. Fixes: 0c1d70af924b ("net: use dst_cache for vxlan device") Fixes: 468dfffcd762 ("geneve: add dst caching support") Fixes: 3c1cb4d2604c ("net/ipv4: add dst cache support for gre lwtunnels") Signed-off-by: Daniel Borkmann Acked-by: Paolo Abeni Acked-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/geneve.c | 6 ++---- drivers/net/vxlan.c | 24 ++++++++++++------------ include/net/ip_tunnels.h | 15 +++++++++++++++ net/core/filter.c | 2 +- net/ipv4/ip_gre.c | 10 ++++++---- 5 files changed, 36 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 36db4cf0579c..6a0cbbe03e5d 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -775,10 +775,10 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct flowi4 *fl4, struct ip_tunnel_info *info) { + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); struct dst_cache *dst_cache; struct rtable *rt = NULL; - bool use_cache = true; __u8 tos; memset(fl4, 0, sizeof(*fl4)); @@ -804,7 +804,6 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, dst_cache = &geneve->dst_cache; } - use_cache = use_cache && !skb->mark; if (use_cache) { rt = dst_cache_get_ip4(dst_cache, &fl4->saddr); if (rt) @@ -832,11 +831,11 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct flowi6 *fl6, struct ip_tunnel_info *info) { + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); struct geneve_sock *gs6 = geneve->sock6; struct dst_entry *dst = NULL; struct dst_cache *dst_cache; - bool use_cache = true; __u8 prio; memset(fl6, 0, sizeof(*fl6)); @@ -862,7 +861,6 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, dst_cache = &geneve->dst_cache; } - use_cache = use_cache && !skb->mark; if (use_cache) { dst = dst_cache_get_ip6(dst_cache, &fl6->saddr); if (dst) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index fc998a3bd234..7294a459b13c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1756,17 +1756,15 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, u8 tos, __be32 daddr, __be32 *saddr, struct dst_cache *dst_cache, - struct ip_tunnel_info *info) + const struct ip_tunnel_info *info) { + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct rtable *rt = NULL; - bool use_cache = false; struct flowi4 fl4; - /* when the ip_tunnel_info is availble, the tos used for lookup is - * packet independent, so we can use the cache - */ - if (!skb->mark && (!tos || info)) { - use_cache = true; + if (tos && !info) + use_cache = false; + if (use_cache) { rt = dst_cache_get_ip4(dst_cache, saddr); if (rt) return rt; @@ -1794,13 +1792,15 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, const struct in6_addr *daddr, struct in6_addr *saddr, - struct dst_cache *dst_cache) + struct dst_cache *dst_cache, + const struct ip_tunnel_info *info) { + bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct dst_entry *ndst; struct flowi6 fl6; int err; - if (!skb->mark) { + if (use_cache) { ndst = dst_cache_get_ip6(dst_cache, saddr); if (ndst) return ndst; @@ -1820,7 +1820,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, return ERR_PTR(err); *saddr = fl6.saddr; - if (!skb->mark) + if (use_cache) dst_cache_set_ip6(dst_cache, ndst, saddr); return ndst; } @@ -2018,7 +2018,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, &dst->sin6.sin6_addr, &saddr, - dst_cache); + dst_cache, info); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", &dst->sin6.sin6_addr); @@ -2387,7 +2387,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; ndst = vxlan6_get_route(vxlan, skb, 0, &info->key.u.ipv6.dst, - &info->key.u.ipv6.src, NULL); + &info->key.u.ipv6.src, NULL, info); if (IS_ERR(ndst)) return PTR_ERR(ndst); dst_release(ndst); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 5f28b606633e..e1395d70fb48 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -140,6 +140,7 @@ struct ip_tunnel { #define TUNNEL_CRIT_OPT __cpu_to_be16(0x0400) #define TUNNEL_GENEVE_OPT __cpu_to_be16(0x0800) #define TUNNEL_VXLAN_OPT __cpu_to_be16(0x1000) +#define TUNNEL_NOCACHE __cpu_to_be16(0x2000) #define TUNNEL_OPTIONS_PRESENT (TUNNEL_GENEVE_OPT | TUNNEL_VXLAN_OPT) @@ -206,6 +207,20 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 0, sizeof(*key) - IP_TUNNEL_KEY_SIZE); } +static inline bool +ip_tunnel_dst_cache_usable(const struct sk_buff *skb, + const struct ip_tunnel_info *info) +{ + if (skb->mark) + return false; + if (!info) + return true; + if (info->key.tun_flags & TUNNEL_NOCACHE) + return false; + + return true; +} + static inline unsigned short ip_tunnel_info_af(const struct ip_tunnel_info *tun_info) { diff --git a/net/core/filter.c b/net/core/filter.c index 012a10c2da94..a66dc03c261f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1870,7 +1870,7 @@ static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) info = &md->u.tun_info; info->mode = IP_TUNNEL_INFO_TX; - info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM; + info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; if (flags & BPF_F_DONT_FRAGMENT) info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 202437d6087b..31936d387cfd 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -527,11 +527,12 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel_info *tun_info; const struct ip_tunnel_key *key; + struct rtable *rt = NULL; struct flowi4 fl; - struct rtable *rt; int min_headroom; int tunnel_hlen; __be16 df, flags; + bool use_cache; int err; tun_info = skb_tunnel_info(skb); @@ -540,13 +541,14 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev) goto err_free_skb; key = &tun_info->key; - rt = !skb->mark ? dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr) : - NULL; + use_cache = ip_tunnel_dst_cache_usable(skb, tun_info); + if (use_cache) + rt = dst_cache_get_ip4(&tun_info->dst_cache, &fl.saddr); if (!rt) { rt = gre_get_rt(skb, dev, &fl, key); if (IS_ERR(rt)) goto err_free_skb; - if (!skb->mark) + if (use_cache) dst_cache_set_ip4(&tun_info->dst_cache, &rt->dst, fl.saddr); } -- cgit v1.2.3 From 1400615d64cf5afee533aff8234c837da465841b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 4 Mar 2016 15:15:08 +0100 Subject: vxlan: allow setting ipv6 traffic class We can already do that for IPv4, but IPv6 support was missing. Add it for vxlan, so it can be used with collect metadata frontends. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 7294a459b13c..2399099e68cf 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1789,7 +1789,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, - struct sk_buff *skb, int oif, + struct sk_buff *skb, int oif, u8 tos, const struct in6_addr *daddr, struct in6_addr *saddr, struct dst_cache *dst_cache, @@ -1800,6 +1800,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct flowi6 fl6; int err; + if (tos && !info) + use_cache = false; if (use_cache) { ndst = dst_cache_get_ip6(dst_cache, saddr); if (ndst) @@ -1808,6 +1810,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; + fl6.flowi6_tos = RT_TOS(tos); fl6.daddr = *daddr; fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; fl6.flowi6_mark = skb->mark; @@ -2016,7 +2019,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, sk = vxlan->vn6_sock->sock->sk; ndst = vxlan6_get_route(vxlan, skb, - rdst ? rdst->remote_ifindex : 0, + rdst ? rdst->remote_ifindex : 0, tos, &dst->sin6.sin6_addr, &saddr, dst_cache, info); if (IS_ERR(ndst)) { @@ -2053,6 +2056,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (!info) udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); + tos = ip_tunnel_ecn_encap(tos, old_iph, skb); ttl = ttl ? : ip6_dst_hoplimit(ndst); skb_scrub_packet(skb, xnet); err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr), @@ -2062,8 +2066,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } udp_tunnel6_xmit_skb(ndst, sk, skb, dev, - &saddr, &dst->sin6.sin6_addr, - 0, ttl, src_port, dst_port, !udp_sum); + &saddr, &dst->sin6.sin6_addr, tos, ttl, + src_port, dst_port, !udp_sum); #endif } @@ -2385,7 +2389,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (!vxlan->vn6_sock) return -EINVAL; - ndst = vxlan6_get_route(vxlan, skb, 0, + ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, &info->key.u.ipv6.dst, &info->key.u.ipv6.src, NULL, info); if (IS_ERR(ndst)) -- cgit v1.2.3 From 27c4d578600c401c119c012a90920805fab05cc9 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:41 -0500 Subject: bnxt_en: Refactor bnxt_fw_to_ethtool_advertised_spds(). Include the conversion of pause bits and add one extra call layer so that the same refactored function can be reused to get the link partner advertisement bits. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 33 ++++++++++++++--------- 1 file changed, 20 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 84ea26d6f3ff..a6ee26a6b6bb 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -515,9 +515,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) return speed_mask; } -static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) { - u16 fw_speeds = link_info->auto_link_speeds; u32 speed_mask = 0; /* TODO: support 25GB, 40GB, 50GB with different cable type */ @@ -532,9 +531,28 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) speed_mask |= ADVERTISED_10000baseT_Full; if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) speed_mask |= ADVERTISED_40000baseCR4_Full; + + if ((fw_pause & BNXT_LINK_PAUSE_BOTH) == BNXT_LINK_PAUSE_BOTH) + speed_mask |= ADVERTISED_Pause; + else if (fw_pause & BNXT_LINK_PAUSE_TX) + speed_mask |= ADVERTISED_Asym_Pause; + else if (fw_pause & BNXT_LINK_PAUSE_RX) + speed_mask |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; + return speed_mask; } +static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->auto_link_speeds; + u8 fw_pause = 0; + + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + fw_pause = link_info->auto_pause_setting; + + return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); +} + u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) { switch (fw_link_speed) { @@ -580,17 +598,6 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->autoneg = AUTONEG_DISABLE; cmd->advertising = 0; } - if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) { - if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == - BNXT_LINK_PAUSE_BOTH) { - cmd->advertising |= ADVERTISED_Pause; - } else { - cmd->advertising |= ADVERTISED_Asym_Pause; - if (link_info->auto_pause_setting & - BNXT_LINK_PAUSE_RX) - cmd->advertising |= ADVERTISED_Pause; - } - } cmd->port = PORT_NONE; if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { -- cgit v1.2.3 From 3277360eb29c6e482391975717d983060ecbd28d Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:42 -0500 Subject: bnxt_en: Add reporting of link partner advertisement. And report actual pause settings to ETHTOOL_GPAUSEPARAM to let ethtool resolve the actual pause settings. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 3 +++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 ++ drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 20 ++++++++++++++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 4dfc25f042e8..b740489b6a5c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4468,6 +4468,7 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->pause = resp->pause; link_info->auto_mode = resp->auto_mode; link_info->auto_pause_setting = resp->auto_pause; + link_info->lp_pause = resp->link_partner_adv_pause; link_info->force_pause_setting = resp->force_pause; link_info->duplex_setting = resp->duplex; if (link_info->phy_link_status == BNXT_LINK_LINK) @@ -4478,6 +4479,8 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed); link_info->support_speeds = le16_to_cpu(resp->support_speeds); link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask); + link_info->lp_auto_link_speeds = + le16_to_cpu(resp->link_partner_adv_speeds); link_info->preemphasis = le32_to_cpu(resp->preemphasis); link_info->phy_ver[0] = resp->phy_maj; link_info->phy_ver[1] = resp->phy_min; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 9aa38f57601b..2f24e4e09b3d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -780,6 +780,7 @@ struct bnxt_link_info { #define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX #define BNXT_LINK_PAUSE_BOTH (PORT_PHY_QCFG_RESP_PAUSE_RX | \ PORT_PHY_QCFG_RESP_PAUSE_TX) + u8 lp_pause; u8 auto_pause_setting; u8 force_pause_setting; u8 duplex_setting; @@ -814,6 +815,7 @@ struct bnxt_link_info { #define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB #define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB #define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB + u16 lp_auto_link_speeds; u16 auto_link_speed; u16 force_link_speed; u32 preemphasis; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index a6ee26a6b6bb..591c290f6588 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -553,6 +553,17 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info) return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); } +static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->lp_auto_link_speeds; + u8 fw_pause = 0; + + if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) + fw_pause = link_info->lp_pause; + + return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); +} + u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) { switch (fw_link_speed) { @@ -594,6 +605,9 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) bnxt_fw_to_ethtool_advertised_spds(link_info); cmd->advertising |= ADVERTISED_Autoneg; cmd->autoneg = AUTONEG_ENABLE; + if (link_info->phy_link_status == BNXT_LINK_LINK) + cmd->lp_advertising = + bnxt_fw_to_ethtool_lp_adv(link_info); } else { cmd->autoneg = AUTONEG_DISABLE; cmd->advertising = 0; @@ -757,8 +771,10 @@ static void bnxt_get_pauseparam(struct net_device *dev, if (BNXT_VF(bp)) return; epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL); - epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); - epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); + epause->rx_pause = + ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_RX) != 0); + epause->tx_pause = + ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_TX) != 0); } static int bnxt_set_pauseparam(struct net_device *dev, -- cgit v1.2.3 From 4b32cacca28fe8b29bf266feff19b6fc2180402e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:43 -0500 Subject: bnxt_en: Use common function to get ethtool supported flags. The supported bits and advertising bits in ethtool have the same definitions. The same is true for the firmware bits. So use the common function to handle the conversion for both supported and advertising bits. v2: Don't use parentheses on function return. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 29 +++++++---------------- 1 file changed, 9 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 591c290f6588..ac9d6e5bc585 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -496,25 +496,6 @@ static void bnxt_get_drvinfo(struct net_device *dev, kfree(pkglog); } -static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) -{ - u16 fw_speeds = link_info->support_speeds; - u32 speed_mask = 0; - - if (fw_speeds & BNXT_LINK_SPEED_MSK_100MB) - speed_mask |= SUPPORTED_100baseT_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_1GB) - speed_mask |= SUPPORTED_1000baseT_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_2_5GB) - speed_mask |= SUPPORTED_2500baseX_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) - speed_mask |= SUPPORTED_10000baseT_Full; - if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) - speed_mask |= SUPPORTED_40000baseCR4_Full; - - return speed_mask; -} - static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) { u32 speed_mask = 0; @@ -564,6 +545,15 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info) return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause); } +static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info) +{ + u16 fw_speeds = link_info->support_speeds; + u32 supported; + + supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0); + return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause; +} + u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed) { switch (fw_link_speed) { @@ -595,7 +585,6 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) u16 ethtool_speed; cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); - cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; if (link_info->auto_link_speeds) cmd->supported |= SUPPORTED_Autoneg; -- cgit v1.2.3 From f1a082a6f79fd5f06b27ef05a5ba7ec8d6e83b4c Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:44 -0500 Subject: bnxt_en: Extend autoneg to all speeds. Allow all autoneg speeds aupported by firmware to be advertised. If the advertising parameter is 0, then all supported speeds will be advertised. Remove BNXT_ALL_COPPER_ETHTOOL_SPEED which is no longer used as all supported speeds can be advertised. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.h | 4 ---- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 14 ++++---------- 2 files changed, 4 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 2f24e4e09b3d..c4424b60d859 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -757,10 +757,6 @@ struct bnxt_ntuple_filter { #define BNXT_FLTR_UPDATE 1 }; -#define BNXT_ALL_COPPER_ETHTOOL_SPEED \ - (ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | \ - ADVERTISED_10000baseT_Full) - struct bnxt_link_info { u8 media_type; u8 transceiver; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index ac9d6e5bc585..c0998025cb11 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -696,16 +696,10 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return rc; if (cmd->autoneg == AUTONEG_ENABLE) { - if (link_info->media_type != PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) { - netdev_err(dev, "Media type doesn't support autoneg\n"); - rc = -EINVAL; - goto set_setting_exit; - } - if (cmd->advertising & ~(BNXT_ALL_COPPER_ETHTOOL_SPEED | - ADVERTISED_Autoneg | - ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Asym_Pause)) { + u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info); + + if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg | + ADVERTISED_TP | ADVERTISED_FIBRE)) { netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n", cmd->advertising); rc = -EINVAL; -- cgit v1.2.3 From 3bdf56c47dfcd819ab1e73644c2eb9c72c08f29e Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:45 -0500 Subject: bnxt_en: Add port statistics support. Gather periodic port statistics if the device is PF and link is up. This is triggered in bnxt_timer() every one second to request firmware to DMA the counters. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 50 +++++++++++++++++++++++++++++++ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 11 ++++++- 2 files changed, 60 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b740489b6a5c..ae5f08ea27d1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -2362,6 +2362,14 @@ static void bnxt_free_stats(struct bnxt *bp) u32 size, i; struct pci_dev *pdev = bp->pdev; + if (bp->hw_rx_port_stats) { + dma_free_coherent(&pdev->dev, bp->hw_port_stats_size, + bp->hw_rx_port_stats, + bp->hw_rx_port_stats_map); + bp->hw_rx_port_stats = NULL; + bp->flags &= ~BNXT_FLAG_PORT_STATS; + } + if (!bp->bnapi) return; @@ -2398,6 +2406,24 @@ static int bnxt_alloc_stats(struct bnxt *bp) cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } + + if (BNXT_PF(bp)) { + bp->hw_port_stats_size = sizeof(struct rx_port_stats) + + sizeof(struct tx_port_stats) + 1024; + + bp->hw_rx_port_stats = + dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size, + &bp->hw_rx_port_stats_map, + GFP_KERNEL); + if (!bp->hw_rx_port_stats) + return -ENOMEM; + + bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) + + 512; + bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map + + sizeof(struct rx_port_stats) + 512; + bp->flags |= BNXT_FLAG_PORT_STATS; + } return 0; } @@ -3834,6 +3860,23 @@ hwrm_ver_get_exit: return rc; } +static int bnxt_hwrm_port_qstats(struct bnxt *bp) +{ + int rc; + struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_port_qstats_input req = {0}; + + if (!(bp->flags & BNXT_FLAG_PORT_STATS)) + return 0; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); + req.port_id = cpu_to_le16(pf->port_id); + req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map); + req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return rc; +} + static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp) { if (bp->vxlan_port_cnt) { @@ -5232,6 +5275,10 @@ static void bnxt_timer(unsigned long data) if (atomic_read(&bp->intr_sem) != 0) goto bnxt_restart_timer; + if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) { + set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); + schedule_work(&bp->sp_task); + } bnxt_restart_timer: mod_timer(&bp->timer, jiffies + bp->current_interval); } @@ -5283,6 +5330,9 @@ static void bnxt_sp_task(struct work_struct *work) rtnl_unlock(); } + if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) + bnxt_hwrm_port_qstats(bp); + smp_mb__before_atomic(); clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index c4424b60d859..ec04c47172b7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -873,6 +873,7 @@ struct bnxt { #define BNXT_FLAG_MSIX_CAP 0x80 #define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_SHARED_RINGS 0x200 + #define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ BNXT_FLAG_RFS | \ @@ -925,7 +926,7 @@ struct bnxt { struct bnxt_queue_info q_info[BNXT_MAX_QUEUE]; unsigned int current_interval; -#define BNXT_TIMER_INTERVAL (HZ / 2) +#define BNXT_TIMER_INTERVAL HZ struct timer_list timer; @@ -945,6 +946,13 @@ struct bnxt { void *hwrm_dbg_resp_addr; dma_addr_t hwrm_dbg_resp_dma_addr; #define HWRM_DBG_REG_BUF_SIZE 128 + + struct rx_port_stats *hw_rx_port_stats; + struct tx_port_stats *hw_tx_port_stats; + dma_addr_t hw_rx_port_stats_map; + dma_addr_t hw_tx_port_stats_map; + int hw_port_stats_size; + int hwrm_cmd_timeout; struct mutex hwrm_cmd_lock; /* serialize hwrm messages */ struct hwrm_ver_get_output ver_resp; @@ -980,6 +988,7 @@ struct bnxt { #define BNXT_RESET_TASK_SP_EVENT 6 #define BNXT_RST_RING_SP_EVENT 7 #define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8 +#define BNXT_PERIODIC_STATS_SP_EVENT 9 struct bnxt_pf_info pf; #ifdef CONFIG_BNXT_SRIOV -- cgit v1.2.3 From 9947f83fb79ca501f5ab24c370211bfb78b6b364 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:46 -0500 Subject: bnxt_en: Include some hardware port statistics in ndo_get_stats64(). Include some of the port error counters (e.g. crc) in ->ndo_get_stats64() for the PF device. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ae5f08ea27d1..1cd00a0d04dd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -4935,6 +4935,22 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + struct rx_port_stats *rx = bp->hw_rx_port_stats; + struct tx_port_stats *tx = bp->hw_tx_port_stats; + + stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames); + stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames); + stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) + + le64_to_cpu(rx->rx_ovrsz_frames) + + le64_to_cpu(rx->rx_runt_frames); + stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) + + le64_to_cpu(rx->rx_jbr_frames); + stats->collisions = le64_to_cpu(tx->tx_total_collisions); + stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); + stats->tx_errors = le64_to_cpu(tx->tx_err); + } + return stats; } -- cgit v1.2.3 From 8ddc9aaa725a9337fc7bbe95fe1d1499769fb9b2 Mon Sep 17 00:00:00 2001 From: Michael Chan Date: Mon, 7 Mar 2016 15:38:47 -0500 Subject: bnxt_en: Include hardware port statistics in ethtool -S. Include the more useful port statistics in ethtool -S for the PF device. Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 105 +++++++++++++++++++++- 1 file changed, 103 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index c0998025cb11..9ada1662b651 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -83,13 +84,99 @@ static int bnxt_set_coalesce(struct net_device *dev, #define BNXT_NUM_STATS 21 +#define BNXT_RX_STATS_OFFSET(counter) \ + (offsetof(struct rx_port_stats, counter) / 8) + +#define BNXT_RX_STATS_ENTRY(counter) \ + { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) } + +#define BNXT_TX_STATS_OFFSET(counter) \ + ((offsetof(struct tx_port_stats, counter) + \ + sizeof(struct rx_port_stats) + 512) / 8) + +#define BNXT_TX_STATS_ENTRY(counter) \ + { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) } + +static const struct { + long offset; + char string[ETH_GSTRING_LEN]; +} bnxt_port_stats_arr[] = { + BNXT_RX_STATS_ENTRY(rx_64b_frames), + BNXT_RX_STATS_ENTRY(rx_65b_127b_frames), + BNXT_RX_STATS_ENTRY(rx_128b_255b_frames), + BNXT_RX_STATS_ENTRY(rx_256b_511b_frames), + BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames), + BNXT_RX_STATS_ENTRY(rx_1024b_1518_frames), + BNXT_RX_STATS_ENTRY(rx_good_vlan_frames), + BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames), + BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames), + BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames), + BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames), + BNXT_RX_STATS_ENTRY(rx_total_frames), + BNXT_RX_STATS_ENTRY(rx_ucast_frames), + BNXT_RX_STATS_ENTRY(rx_mcast_frames), + BNXT_RX_STATS_ENTRY(rx_bcast_frames), + BNXT_RX_STATS_ENTRY(rx_fcs_err_frames), + BNXT_RX_STATS_ENTRY(rx_ctrl_frames), + BNXT_RX_STATS_ENTRY(rx_pause_frames), + BNXT_RX_STATS_ENTRY(rx_pfc_frames), + BNXT_RX_STATS_ENTRY(rx_align_err_frames), + BNXT_RX_STATS_ENTRY(rx_ovrsz_frames), + BNXT_RX_STATS_ENTRY(rx_jbr_frames), + BNXT_RX_STATS_ENTRY(rx_mtu_err_frames), + BNXT_RX_STATS_ENTRY(rx_tagged_frames), + BNXT_RX_STATS_ENTRY(rx_double_tagged_frames), + BNXT_RX_STATS_ENTRY(rx_good_frames), + BNXT_RX_STATS_ENTRY(rx_undrsz_frames), + BNXT_RX_STATS_ENTRY(rx_eee_lpi_events), + BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration), + BNXT_RX_STATS_ENTRY(rx_bytes), + BNXT_RX_STATS_ENTRY(rx_runt_bytes), + BNXT_RX_STATS_ENTRY(rx_runt_frames), + + BNXT_TX_STATS_ENTRY(tx_64b_frames), + BNXT_TX_STATS_ENTRY(tx_65b_127b_frames), + BNXT_TX_STATS_ENTRY(tx_128b_255b_frames), + BNXT_TX_STATS_ENTRY(tx_256b_511b_frames), + BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames), + BNXT_TX_STATS_ENTRY(tx_1024b_1518_frames), + BNXT_TX_STATS_ENTRY(tx_good_vlan_frames), + BNXT_TX_STATS_ENTRY(tx_1519b_2047_frames), + BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames), + BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames), + BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames), + BNXT_TX_STATS_ENTRY(tx_good_frames), + BNXT_TX_STATS_ENTRY(tx_total_frames), + BNXT_TX_STATS_ENTRY(tx_ucast_frames), + BNXT_TX_STATS_ENTRY(tx_mcast_frames), + BNXT_TX_STATS_ENTRY(tx_bcast_frames), + BNXT_TX_STATS_ENTRY(tx_pause_frames), + BNXT_TX_STATS_ENTRY(tx_pfc_frames), + BNXT_TX_STATS_ENTRY(tx_jabber_frames), + BNXT_TX_STATS_ENTRY(tx_fcs_err_frames), + BNXT_TX_STATS_ENTRY(tx_err), + BNXT_TX_STATS_ENTRY(tx_fifo_underruns), + BNXT_TX_STATS_ENTRY(tx_eee_lpi_events), + BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration), + BNXT_TX_STATS_ENTRY(tx_total_collisions), + BNXT_TX_STATS_ENTRY(tx_bytes), +}; + +#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) + static int bnxt_get_sset_count(struct net_device *dev, int sset) { struct bnxt *bp = netdev_priv(dev); switch (sset) { - case ETH_SS_STATS: - return BNXT_NUM_STATS * bp->cp_nr_rings; + case ETH_SS_STATS: { + int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings; + + if (bp->flags & BNXT_FLAG_PORT_STATS) + num_stats += BNXT_NUM_PORT_STATS; + + return num_stats; + } default: return -EOPNOTSUPP; } @@ -118,6 +205,14 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = le64_to_cpu(hw_stats[k]); buf[j++] = cpr->rx_l4_csum_errors; } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; + + for (i = 0; i < BNXT_NUM_PORT_STATS; i++, j++) { + buf[j] = le64_to_cpu(*(port_stats + + bnxt_port_stats_arr[i].offset)); + } + } } static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) @@ -172,6 +267,12 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) sprintf(buf, "[%d]: rx_l4_csum_errors", i); buf += ETH_GSTRING_LEN; } + if (bp->flags & BNXT_FLAG_PORT_STATS) { + for (i = 0; i < BNXT_NUM_PORT_STATS; i++) { + strcpy(buf, bnxt_port_stats_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } break; default: netdev_err(bp->dev, "bnxt_get_strings invalid request %x\n", -- cgit v1.2.3 From 6316ea6db93d875df333e7ab205bf1aa3b3616d7 Mon Sep 17 00:00:00 2001 From: Satish Baddipadige Date: Mon, 7 Mar 2016 15:38:48 -0500 Subject: bnxt_en: Enable AER support. Add pci_error_handler callbacks to support for pcie advanced error recovery. Signed-off-by: Satish Baddipadige Signed-off-by: Michael Chan Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnxt/bnxt.c | 109 ++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 1cd00a0d04dd..aabbd51db981 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -5412,6 +5412,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev) goto init_err_release; } + pci_enable_pcie_error_reporting(pdev); + INIT_WORK(&bp->sp_task, bnxt_sp_task); spin_lock_init(&bp->ntp_fltr_lock); @@ -5791,6 +5793,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) bnxt_sriov_disable(bp); + pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); cancel_work_sync(&bp->sp_task); bp->sp_event = 0; @@ -6030,11 +6033,117 @@ init_err_free: return rc; } +/** + * bnxt_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + netdev_info(netdev, "PCI I/O error detected\n"); + + rtnl_lock(); + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) { + rtnl_unlock(); + return PCI_ERS_RESULT_DISCONNECT; + } + + if (netif_running(netdev)) + bnxt_close(netdev); + + pci_disable_device(pdev); + rtnl_unlock(); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * bnxt_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. + * At this point, the card has exprienced a hard reset, + * followed by fixups by BIOS, and has its config space + * set up identically to what it was at cold boot. + */ +static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct bnxt *bp = netdev_priv(netdev); + int err = 0; + pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; + + netdev_info(bp->dev, "PCI Slot Reset\n"); + + rtnl_lock(); + + if (pci_enable_device(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + } else { + pci_set_master(pdev); + + if (netif_running(netdev)) + err = bnxt_open(netdev); + + if (!err) + result = PCI_ERS_RESULT_RECOVERED; + } + + if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) + dev_close(netdev); + + rtnl_unlock(); + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n", + err); /* non-fatal, continue */ + } + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * bnxt_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells + * us that its OK to resume normal operation. + */ +static void bnxt_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + + rtnl_lock(); + + netif_device_attach(netdev); + + rtnl_unlock(); +} + +static const struct pci_error_handlers bnxt_err_handler = { + .error_detected = bnxt_io_error_detected, + .slot_reset = bnxt_io_slot_reset, + .resume = bnxt_io_resume +}; + static struct pci_driver bnxt_pci_driver = { .name = DRV_MODULE_NAME, .id_table = bnxt_pci_tbl, .probe = bnxt_init_one, .remove = bnxt_remove_one, + .err_handler = &bnxt_err_handler, #if defined(CONFIG_BNXT_SRIOV) .sriov_configure = bnxt_sriov_configure, #endif -- cgit v1.2.3 From 00f481bd895a826058d301b3093e86e819497b51 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Wed, 24 Feb 2016 12:19:22 +0200 Subject: iwlwifi: mvm: add ctdp operations to debugfs Add debugfs entries to get the ctdp budget average and to stop ctdp. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | 42 +++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 78 +++++++++++++----------- 2 files changed, 86 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 56e6b0b8b9cc..a43b3921c4c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -73,6 +73,44 @@ #include "debugfs.h" #include "iwl-fw-error-dump.h" +static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm *mvm = file->private_data; + char buf[16]; + int pos, budget; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0); + mutex_unlock(&mvm->mutex); + + if (budget < 0) + return budget; + + pos = scnprintf(buf, sizeof(buf), "%d\n", budget); + + return simple_read_from_buffer(user_buf, count, ppos, buf, pos); +} + +static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf, + size_t count, loff_t *ppos) +{ + int ret; + + if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR) + return -EIO; + + mutex_lock(&mvm->mutex); + ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0); + mutex_unlock(&mvm->mutex); + + return ret ?: count; +} + static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, size_t count, loff_t *ppos) { @@ -1493,6 +1531,8 @@ iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf, MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64); /* Device wide debugfs entries */ +MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget); +MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); @@ -1542,6 +1582,8 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, S_IWUSR | S_IRUSR); MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, S_IRUSR); + MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, S_IWUSR); MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR); MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 0a02e9835d6b..ce0c6fd183bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -510,6 +510,50 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = { .support_tx_backoff = true, }; +int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget) +{ + struct iwl_mvm_ctdp_cmd cmd = { + .operation = cpu_to_le32(op), + .budget = cpu_to_le32(budget), + .window_size = 0, + }; + int ret; + u32 status; + + lockdep_assert_held(&mvm->mutex); + + ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, + CTDP_CONFIG_CMD), + sizeof(cmd), &cmd, &status); + + if (ret) { + IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret); + return ret; + } + + switch (op) { + case CTDP_CMD_OPERATION_START: +#ifdef CONFIG_THERMAL + mvm->cooling_dev.cur_state = budget; +#endif /* CONFIG_THERMAL */ + break; + case CTDP_CMD_OPERATION_REPORT: + IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status); + /* when the function is called with CTDP_CMD_OPERATION_REPORT + * option the function should return the average budget value + * that is received from the FW. + * The budget can't be less or equal to 0, so it's possible + * to distinguish between error values and budgets. + */ + return status; + case CTDP_CMD_OPERATION_STOP: + IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n"); + break; + } + + return 0; +} + #ifdef CONFIG_THERMAL static int compare_temps(const void *a, const void *b) { @@ -738,40 +782,6 @@ static const u32 iwl_mvm_cdev_budgets[] = { 150, /* cooling state 19 */ }; -int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget) -{ - struct iwl_mvm_ctdp_cmd cmd = { - .operation = cpu_to_le32(op), - .budget = cpu_to_le32(budget), - .window_size = 0, - }; - int ret; - u32 status; - - lockdep_assert_held(&mvm->mutex); - - ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, - CTDP_CONFIG_CMD), - sizeof(cmd), &cmd, &status); - - if (ret) { - IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret); - return ret; - } - - /* can happen if the registration failed */ - if (!mvm->cooling_dev.cdev) - return -EINVAL; - - if (op == CTDP_CMD_OPERATION_START) - mvm->cooling_dev.cur_state = budget; - - else if (op == CTDP_CMD_OPERATION_REPORT) - IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status); - - return 0; -} - static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { -- cgit v1.2.3 From 62d23403d4a201db117975d46c1889c79987069e Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Sun, 6 Mar 2016 09:51:29 +0200 Subject: iwlwifi: mvm: turn off AMSDU bit in QoS control for de-aggregated AMSDUs Our hardware de-aggregates AMSDUs but copies the mac header as it to the de-aggregated MPDUs. We need to turn off the AMSDU bit in the QoS control ourselves. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index cd6ca374e5d3..9a54f2d2a66b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -519,6 +519,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rcu_read_unlock(); return; } + + /* + * Our hardware de-aggregates AMSDUs but copies the mac header + * as it to the de-aggregated MPDUs. We need to turn off the + * AMSDU bit in the QoS control ourselves. + */ + if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) && + !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) { + u8 *qc = ieee80211_get_qos_ctl(hdr); + + *qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } } /* -- cgit v1.2.3 From 5151ad953c68f2af0b50cf8d074a902ccf001ae0 Mon Sep 17 00:00:00 2001 From: Matti Gottlieb Date: Wed, 9 Mar 2016 14:46:28 +0200 Subject: iwlwifi: mvm: ROC: cleanup time event info on FW failure Currently when the FW sends start/stop aux roc time event notification with an error status, the driver returns an error value, but does not remove the time event, and does not notify the stack above that the time event is over. This causes problems that the stack above assumes we are still in the middle of a time event, and therefore can block different events, such as scanning. On FW failure notification, cleanup the time event parameters and notify the stack above that the time event is over. Signed-off-by: Matti Gottlieb Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/time-event.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 924dd6a41626..2c12789e7550 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -371,20 +371,13 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm, iwl_mvm_te_check_trigger(mvm, notif, te_data); - if (!le32_to_cpu(notif->status)) { - IWL_DEBUG_TE(mvm, - "ERROR: Aux ROC Time Event %s notification failure\n", - (le32_to_cpu(notif->action) & - TE_V2_NOTIF_HOST_EVENT_START) ? "start" : "end"); - return -EINVAL; - } - IWL_DEBUG_TE(mvm, - "Aux ROC time event notification - UID = 0x%x action %d\n", + "Aux ROC time event notification - UID = 0x%x action %d (error = %d)\n", le32_to_cpu(notif->unique_id), - le32_to_cpu(notif->action)); + le32_to_cpu(notif->action), le32_to_cpu(notif->status)); - if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { + if (!le32_to_cpu(notif->status) || + le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) { /* End TE, notify mac80211 */ ieee80211_remain_on_channel_expired(mvm->hw); iwl_mvm_roc_finished(mvm); /* flush aux queue */ -- cgit v1.2.3 From c9cb14a64c32f890d5a0649cb7d81dbfece33056 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Thu, 3 Mar 2016 15:35:34 +0200 Subject: iwlwifi: mvm: add support for async rx handler without hold the mutex When running async rx handler the framework holds the mvm->mutex before starting the async handler, that might cause a deadlock in case the handler calls to ops that lock the mutex as well. Add support for running async rx handler without hold the mutex before activating the handler. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/ops.c | 114 +++++++++++++++++---------- 1 file changed, 73 insertions(+), 41 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 4322e8ed1159..5e8ab796d5bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -205,79 +205,107 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode) ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS); } +/** + * enum iwl_rx_handler_context context for Rx handler + * @RX_HANDLER_SYNC : this means that it will be called in the Rx path + * which can't acquire mvm->mutex. + * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex + * (and only in this case!), it should be set as ASYNC. In that case, + * it will be called from a worker with mvm->mutex held. + * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the + * mutex itself, it will be called from a worker without mvm->mutex held. + */ +enum iwl_rx_handler_context { + RX_HANDLER_SYNC, + RX_HANDLER_ASYNC_LOCKED, + RX_HANDLER_ASYNC_UNLOCKED, +}; + +/** + * struct iwl_rx_handlers handler for FW notification + * @cmd_id: command id + * @context: see &iwl_rx_handler_context + * @fn: the function is called when notification is received + */ struct iwl_rx_handlers { u16 cmd_id; - bool async; + enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; -#define RX_HANDLER(_cmd_id, _fn, _async) \ - { .cmd_id = _cmd_id , .fn = _fn , .async = _async } -#define RX_HANDLER_GRP(_grp, _cmd, _fn, _async) \ - { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .async = _async } +#define RX_HANDLER(_cmd_id, _fn, _context) \ + { .cmd_id = _cmd_id, .fn = _fn, .context = _context } +#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context) \ + { .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context } /* * Handlers for fw notifications * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME * This list should be in order of frequency for performance purposes. * - * The handler can be SYNC - this means that it will be called in the Rx path - * which can't acquire mvm->mutex. If the handler needs to hold mvm->mutex (and - * only in this case!), it should be set as ASYNC. In that case, it will be - * called from a worker with mvm->mutex held. + * The handler can be one from three contexts, see &iwl_rx_handler_context */ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { - RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, false), - RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, false), - - RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true), - RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, true), - RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true), + RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC), + RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC), + + RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION, - iwl_mvm_rx_ant_coupling_notif, true), + iwl_mvm_rx_ant_coupling_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID, - iwl_mvm_window_status_notif, false), + iwl_mvm_window_status_notif, RX_HANDLER_SYNC), - RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false), - RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, true), + RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, + RX_HANDLER_SYNC), + RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc, + RX_HANDLER_ASYNC_LOCKED), - RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false), + RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC), RX_HANDLER(SCAN_ITERATION_COMPLETE, - iwl_mvm_rx_lmac_scan_iter_complete_notif, false), + iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC), RX_HANDLER(SCAN_OFFLOAD_COMPLETE, - iwl_mvm_rx_lmac_scan_complete_notif, true), + iwl_mvm_rx_lmac_scan_complete_notif, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found, - false), + RX_HANDLER_SYNC), RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif, - true), + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC, - iwl_mvm_rx_umac_scan_iter_complete_notif, false), + iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC), - RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), + RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, + RX_HANDLER_SYNC), RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif, - false), + RX_HANDLER_SYNC), - RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, false), + RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC), RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION, - iwl_mvm_power_uapsd_misbehaving_ap_notif, false), - RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, true), + iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC), + RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE, - iwl_mvm_temp_notif, true), + iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION, - iwl_mvm_ct_kill_notif, false), + iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC), RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif, - true), - RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false), - RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true), + RX_HANDLER_ASYNC_LOCKED), + RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, + RX_HANDLER_SYNC), + RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, + RX_HANDLER_ASYNC_LOCKED), RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF, - iwl_mvm_rx_stored_beacon_notif, false), + iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC), RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF, - iwl_mvm_mu_mimo_grp_notif, false), - + iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC), }; #undef RX_HANDLER #undef RX_HANDLER_GRP @@ -743,6 +771,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) struct iwl_async_handler_entry { struct list_head list; struct iwl_rx_cmd_buffer rxb; + enum iwl_rx_handler_context context; void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb); }; @@ -769,7 +798,6 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) INIT_LIST_HEAD(&local_list); /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */ - mutex_lock(&mvm->mutex); /* * Sync with Rx path with a lock. Remove all the entries from this list, @@ -780,12 +808,15 @@ static void iwl_mvm_async_handlers_wk(struct work_struct *wk) spin_unlock_bh(&mvm->async_handlers_lock); list_for_each_entry_safe(entry, tmp, &local_list, list) { + if (entry->context == RX_HANDLER_ASYNC_LOCKED) + mutex_lock(&mvm->mutex); entry->fn(mvm, &entry->rxb); iwl_free_rxb(&entry->rxb); list_del(&entry->list); + if (entry->context == RX_HANDLER_ASYNC_LOCKED) + mutex_unlock(&mvm->mutex); kfree(entry); } - mutex_unlock(&mvm->mutex); } static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm, @@ -842,7 +873,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)) continue; - if (!rx_h->async) { + if (rx_h->context == RX_HANDLER_SYNC) { rx_h->fn(mvm, rxb); return; } @@ -856,6 +887,7 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, entry->rxb._offset = rxb->_offset; entry->rxb._rx_page_order = rxb->_rx_page_order; entry->fn = rx_h->fn; + entry->context = rx_h->context; spin_lock(&mvm->async_handlers_lock); list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); -- cgit v1.2.3 From 7b5424361ec9270f40c3e23cb747cc8b9ee66235 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 1 Feb 2016 13:46:06 +0200 Subject: iwlwifi: pcie: fine tune number of rxbs We kick the allocator when we have 2 RBDs that don't have attached RBs, and the allocator allocates 8 RBs meaning that it needs another 6 RBDs to attach the RBs to. The design is that allocator should always have enough RBDs to fulfill requests, so we give in advance 6 RBDs to the allocator so that when it is kicked, it gets additional 2 RBDs and has enough RBDs. These RBDs were taken from the Rx queue itself, meaning that each Rx queue didn't have the maximal number of RBDs, but MAX - 6. Change initial number of RBDs in the system to include both queue size and allocator reserves. Note the multi-queue is always 511 instead of 512 to avoid a full queue since we cannot detect this state easily enough in the 9000 arch. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-fh.h | 9 ++++++--- drivers/net/wireless/intel/iwlwifi/pcie/internal.h | 2 +- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 11 ++++++----- 3 files changed, 13 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 8af818b10e71..582008a66069 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -511,9 +511,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) */ #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002) -#define MQ_RX_TABLE_SIZE 512 -#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) -#define MQ_RX_POOL_SIZE MQ_RX_TABLE_MASK +#define MQ_RX_TABLE_SIZE 512 +#define MQ_RX_TABLE_MASK (MQ_RX_TABLE_SIZE - 1) +#define MQ_RX_NUM_RBDS (MQ_RX_TABLE_SIZE - 1) +#define RX_POOL_SIZE (MQ_RX_NUM_RBDS + \ + IWL_MAX_RX_HW_QUEUES * \ + (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC)) #define RX_QUEUE_SIZE 256 #define RX_QUEUE_MASK 255 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 6677f3122226..dadafbdef9d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -347,7 +347,7 @@ struct iwl_tso_hdr_page { */ struct iwl_trans_pcie { struct iwl_rxq *rxq; - struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE]; + struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE]; struct iwl_rb_allocator rba; struct iwl_trans *trans; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 509f79003241..98524a006f7a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -434,7 +434,7 @@ static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < MQ_RX_POOL_SIZE; i++) { + for (i = 0; i < RX_POOL_SIZE; i++) { if (!trans_pcie->rx_pool[i].page) continue; dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, @@ -835,7 +835,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i, err, num_rbds, allocator_pool_size; + int i, err, queue_size, allocator_pool_size, num_alloc; if (!trans_pcie->rxq) { err = iwl_pcie_rx_alloc(trans); @@ -887,11 +887,12 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) } /* move the pool to the default queue and allocator ownerships */ - num_rbds = trans->cfg->mq_rx_supported ? - MQ_RX_POOL_SIZE : RX_QUEUE_SIZE; + queue_size = trans->cfg->mq_rx_supported ? + MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); - for (i = 0; i < num_rbds; i++) { + num_alloc = queue_size + allocator_pool_size; + for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; if (i < allocator_pool_size) -- cgit v1.2.3 From 17c867bfe89bd2e089752ac938468900387acbe2 Mon Sep 17 00:00:00 2001 From: Sara Sharon Date: Mon, 7 Mar 2016 14:18:29 +0200 Subject: iwlwifi: add support for getting HW address from CSR From 9000 family on, we need to get HW address from host CSR registers. OEM can override it by fusing the override registers - read those first, and if those are 0 - read the OTP registers instead. In addition - bail out if no valid mac address is present. Make it shared for all NICs. Signed-off-by: Sara Sharon Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-9000.c | 3 +- drivers/net/wireless/intel/iwlwifi/iwl-config.h | 2 + drivers/net/wireless/intel/iwlwifi/iwl-csr.h | 10 +++ drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | 75 ++++++++++++++++------ 4 files changed, 68 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c index 8e32a57dda0f..318b1dc171f2 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c @@ -140,7 +140,8 @@ static const struct iwl_tt_params iwl9000_tt_params = { .thermal_params = &iwl9000_tt_params, \ .apmg_not_supported = true, \ .mq_rx_supported = true, \ - .vht_mu_mimo_supported = true + .vht_mu_mimo_supported = true, \ + .mac_addr_from_csr = true const struct iwl_cfg iwl9260_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9260", diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 4f2b57e8bbc7..3e4d346be350 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -297,6 +297,7 @@ struct iwl_pwr_tx_backoff { * @host_interrupt_operation_mode: device needs host interrupt operation * mode set * @nvm_hw_section_num: the ID of the HW NVM section + * @mac_addr_from_csr: read HW address from CSR registers * @features: hw features, any combination of feature_whitelist * @pwr_tx_backoffs: translation table between power limits and backoffs * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response @@ -345,6 +346,7 @@ struct iwl_cfg { const bool host_interrupt_operation_mode; bool high_temp; u8 nvm_hw_section_num; + bool mac_addr_from_csr; bool lp_xtal_workaround; const struct iwl_pwr_tx_backoff *pwr_tx_backoffs; bool no_power_up_nic_in_init; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index a79c4f61a851..b978f6cae55c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -598,4 +598,14 @@ enum msix_hw_int_causes { #define MSIX_AUTO_CLEAR_CAUSE 0 #define MSIX_NON_AUTO_CLEAR_CAUSE BIT(7) +/***************************************************************************** + * HW address related registers * + *****************************************************************************/ + +#define CSR_ADDR_BASE (0x380) +#define CSR_MAC_ADDR0_OTP (CSR_ADDR_BASE) +#define CSR_MAC_ADDR1_OTP (CSR_ADDR_BASE + 4) +#define CSR_MAC_ADDR0_STRAP (CSR_ADDR_BASE + 8) +#define CSR_MAC_ADDR1_STRAP (CSR_ADDR_BASE + 0xC) + #endif /* !__iwl_csr_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 5e6b90da3179..93a689583dff 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -71,6 +71,8 @@ #include "iwl-modparams.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" +#include "iwl-io.h" +#include "iwl-csr.h" /* NVM offsets (in words) definitions */ enum wkp_nvm_offsets { @@ -524,6 +526,36 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, data->valid_rx_ant = NVM_RF_CFG_RX_ANT_MSK_FAMILY_8000(radio_cfg); } +static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest) +{ + const u8 *hw_addr; + + hw_addr = (const u8 *)&mac_addr0; + dest[0] = hw_addr[3]; + dest[1] = hw_addr[2]; + dest[2] = hw_addr[1]; + dest[3] = hw_addr[0]; + + hw_addr = (const u8 *)&mac_addr1; + dest[4] = hw_addr[1]; + dest[5] = hw_addr[0]; +} + +static void iwl_set_hw_address_from_csr(struct iwl_trans *trans, + struct iwl_nvm_data *data) +{ + __le32 mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_STRAP)); + __le32 mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_STRAP)); + + /* If OEM did not fuse address - get it from OTP */ + if (!mac_addr0 && !mac_addr1) { + mac_addr0 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR0_OTP)); + mac_addr1 = cpu_to_le32(iwl_read32(trans, CSR_MAC_ADDR1_OTP)); + } + + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); +} + static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, @@ -564,21 +596,8 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, WFMP_MAC_ADDR_0)); __le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans, WFMP_MAC_ADDR_1)); - /* read the MAC address from HW resisters */ - hw_addr = (const u8 *)&mac_addr0; - data->hw_addr[0] = hw_addr[3]; - data->hw_addr[1] = hw_addr[2]; - data->hw_addr[2] = hw_addr[1]; - data->hw_addr[3] = hw_addr[0]; - - hw_addr = (const u8 *)&mac_addr1; - data->hw_addr[4] = hw_addr[1]; - data->hw_addr[5] = hw_addr[0]; - - if (!is_valid_ether_addr(data->hw_addr)) - IWL_ERR(trans, - "mac address (%pM) from hw section is not valid\n", - data->hw_addr); + + iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr); return; } @@ -586,12 +605,14 @@ static void iwl_set_hw_address_family_8000(struct iwl_trans *trans, IWL_ERR(trans, "mac address is not found\n"); } -static void iwl_set_hw_address(struct iwl_trans *trans, - const struct iwl_cfg *cfg, - struct iwl_nvm_data *data, const __le16 *nvm_hw, - const __le16 *mac_override) +static int iwl_set_hw_address(struct iwl_trans *trans, + const struct iwl_cfg *cfg, + struct iwl_nvm_data *data, const __le16 *nvm_hw, + const __le16 *mac_override) { - if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { + if (cfg->mac_addr_from_csr) { + iwl_set_hw_address_from_csr(trans, data); + } else if (cfg->device_family != IWL_DEVICE_FAMILY_8000) { const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); /* The byte order is little endian 16 bit, meaning 214365 */ @@ -605,6 +626,13 @@ static void iwl_set_hw_address(struct iwl_trans *trans, iwl_set_hw_address_family_8000(trans, cfg, data, mac_override, nvm_hw); } + + if (!is_valid_ether_addr(data->hw_addr)) { + IWL_ERR(trans, "no valid mac address was found\n"); + return -EINVAL; + } + + return 0; } struct iwl_nvm_data * @@ -680,7 +708,12 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, ch_section = regulatory; } - iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override); + /* If no valid mac address was found - bail out */ + if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) { + kfree(data); + return NULL; + } + iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains, lar_fw_supported && lar_enabled); data->calib_version = 255; -- cgit v1.2.3 From 532beba378d26d5bd9bbb1b485e969c13bf72009 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 7 Mar 2016 22:23:52 +0200 Subject: iwlwifi: mvm: don't let NDPs mess the packet tracking We need to track the next packet that we will reclaim in order to know when the Tx queues are empty. This is useful when we open or tear down an A-MPDU session which requires to switch queue. The next packet being reclaimed is identified by its WiFi sequence number and this is relevant only when we use QoS. QoS NDPs do have a TID but have a meaningless sequence number. The spec mandates the receiver to ignore the sequence number in this case, allowing the transmitter to put any sequence number. Our implementation leaves it 0. When we reclaim a QoS NDP, we can't update the next_relcaim counter since the sequence number of the QoS NDP itself is invalid. We used to update the next_reclaim based on the sequence number of the QoS NDP which reset it to 1 (0 + 1) and because of this, we never knew when the queue got empty. This had to sad consequence to stuck the A-MPDU state machine in a transient state. To fix this, don't update next_reclaim when we reclaim a QoS NDP. Alesya saw this bug when testing u-APSD. Because the A-MPDU state machine was stuck in EMPTYING_DELBA, we updated mac80211 that we still have frames for that station when it got back to sleep. mac80211 then wrongly set the TIM bit in the beacon and requested to release non-existent frames from the A-MPDU queue. This led to a situation where the client was trying to poll frames but we had no frames to send. Reported-by: Alesya Shapira Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/tx.c | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 271e8da6d140..75870e68a7c3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -963,6 +964,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, struct sk_buff_head skbs; u8 skb_freed = 0; u16 next_reclaimed, seq_ctl; + bool is_ndp = false; __skb_queue_head_init(&skbs); @@ -1016,6 +1018,20 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, seq_ctl = le16_to_cpu(hdr->seq_ctrl); } + if (unlikely(!seq_ctl)) { + struct ieee80211_hdr *hdr = (void *)skb->data; + + /* + * If it is an NDP, we can't update next_reclaim since + * its sequence control is 0. Note that for that same + * reason, NDPs are never sent to A-MPDU'able queues + * so that we can never have more than one freed frame + * for a single Tx resonse (see WARN_ON below). + */ + if (ieee80211_is_qos_nullfunc(hdr->frame_control)) + is_ndp = true; + } + /* * TODO: this is not accurate if we are freeing more than one * packet. @@ -1079,9 +1095,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, bool send_eosp_ndp = false; spin_lock_bh(&mvmsta->lock); - tid_data->next_reclaimed = next_reclaimed; - IWL_DEBUG_TX_REPLY(mvm, "Next reclaimed packet:%d\n", - next_reclaimed); + if (!is_ndp) { + tid_data->next_reclaimed = next_reclaimed; + IWL_DEBUG_TX_REPLY(mvm, + "Next reclaimed packet:%d\n", + next_reclaimed); + } else { + IWL_DEBUG_TX_REPLY(mvm, + "NDP - don't update next_reclaimed\n"); + } + iwl_mvm_check_ratid_empty(mvm, sta, tid); if (mvmsta->sleep_tx_count) { -- cgit v1.2.3 From 416cb2467bba25a6729b95e1ada0254a9477a360 Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Tue, 8 Mar 2016 13:53:05 +0200 Subject: iwlwifi: mvm: remove RRM advertisement mac80211 advertises this feature for all its drivers. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3590835a308f..39b9c383c272 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -610,8 +610,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; - wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_RRM); - mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; #ifdef CONFIG_PM_SLEEP -- cgit v1.2.3 From b358993b3f582f43a6098028b5b08d084dff4d24 Mon Sep 17 00:00:00 2001 From: Chaya Rachel Ivgi Date: Sun, 14 Feb 2016 14:03:10 +0200 Subject: iwlwifi: mvm: return the cooling state index instead of the budget iwl_mvm_tcool_get_cur_state is the function that returns the cooling state index to the sysfs handler. This function returns mvm->cooling_dev.cur_state but that variable was set to the budget and not the cooling state index. Fix that. Add a missing blank line while at it. Signed-off-by: Chaya Rachel Ivgi Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 4 +- drivers/net/wireless/intel/iwlwifi/mvm/tt.c | 56 ++++++++++++++-------------- 2 files changed, 31 insertions(+), 29 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index b869db9afc52..9abbc93e3c06 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -543,8 +543,8 @@ struct iwl_mvm_thermal_device { }; /* - * iwl_mvm_cooling_device - * @cur_state: current state in milliwatts + * struct iwl_mvm_cooling_device + * @cur_state: current state * @cdev: struct thermal cooling device */ struct iwl_mvm_cooling_device { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index ce0c6fd183bb..f1f28255a3a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -510,11 +510,35 @@ static const struct iwl_tt_params iwl_mvm_default_tt_params = { .support_tx_backoff = true, }; -int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget) +/* budget in mWatt */ +static const u32 iwl_mvm_cdev_budgets[] = { + 2000, /* cooling state 0 */ + 1800, /* cooling state 1 */ + 1600, /* cooling state 2 */ + 1400, /* cooling state 3 */ + 1200, /* cooling state 4 */ + 1000, /* cooling state 5 */ + 900, /* cooling state 6 */ + 800, /* cooling state 7 */ + 700, /* cooling state 8 */ + 650, /* cooling state 9 */ + 600, /* cooling state 10 */ + 550, /* cooling state 11 */ + 500, /* cooling state 12 */ + 450, /* cooling state 13 */ + 400, /* cooling state 14 */ + 350, /* cooling state 15 */ + 300, /* cooling state 16 */ + 250, /* cooling state 17 */ + 200, /* cooling state 18 */ + 150, /* cooling state 19 */ +}; + +int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) { struct iwl_mvm_ctdp_cmd cmd = { .operation = cpu_to_le32(op), - .budget = cpu_to_le32(budget), + .budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]), .window_size = 0, }; int ret; @@ -534,7 +558,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget) switch (op) { case CTDP_CMD_OPERATION_START: #ifdef CONFIG_THERMAL - mvm->cooling_dev.cur_state = budget; + mvm->cooling_dev.cur_state = state; #endif /* CONFIG_THERMAL */ break; case CTDP_CMD_OPERATION_REPORT: @@ -759,29 +783,6 @@ static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm) mvm->tz_device.temp_trips[i] = S16_MIN; } -static const u32 iwl_mvm_cdev_budgets[] = { - 2000, /* cooling state 0 */ - 1800, /* cooling state 1 */ - 1600, /* cooling state 2 */ - 1400, /* cooling state 3 */ - 1200, /* cooling state 4 */ - 1000, /* cooling state 5 */ - 900, /* cooling state 6 */ - 800, /* cooling state 7 */ - 700, /* cooling state 8 */ - 650, /* cooling state 9 */ - 600, /* cooling state 10 */ - 550, /* cooling state 11 */ - 500, /* cooling state 12 */ - 450, /* cooling state 13 */ - 400, /* cooling state 14 */ - 350, /* cooling state 15 */ - 300, /* cooling state 16 */ - 250, /* cooling state 17 */ - 200, /* cooling state 18 */ - 150, /* cooling state 19 */ -}; - static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { @@ -799,6 +800,7 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev, return -EBUSY; *state = mvm->cooling_dev.cur_state; + return 0; } @@ -822,7 +824,7 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, } ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, - iwl_mvm_cdev_budgets[new_state]); + new_state); unlock: mutex_unlock(&mvm->mutex); -- cgit v1.2.3 From e0e168dc8c4e41bab6ab94a838cfee50dd2b5f25 Mon Sep 17 00:00:00 2001 From: Gregory Greenman Date: Mon, 29 Feb 2016 15:34:25 +0200 Subject: iwlwifi: pcie: avoid restocks inside rx loop if not emergency When trying to reach high Rx throughput of more than 500Mbps on a device with a relatively weak CPU (Atom x5-Z8500), CPU utilization may become a bottleneck. Analysis showed that we are looping in iwl_pcie_rx_handle for very long periods which led to starvation of other threads (iwl_pcie_rx_handle runs with _bh disabled). We were handling Rx and allocating new buffers and the new buffers were ready quickly enough to be available before we had finished handling all the buffers available in the hardware. As a consequence, we called iwl_pcie_rxq_restock to refill the hardware with the new buffers, and start again handling new buffers without exiting the function. Since we read the hardware pointer again when we goto restart, new buffers were handled immediately instead of exiting the function. This patch avoids refilling RBs inside rx handling loop, unless an emergency situation is reached. It also doesn't read the hardware pointer again unless we are in an emergency (unlikely) case. This significantly reduce the maximal time we spend in iwl_pcie_rx_handle with _bh disabled. Signed-off-by: Gregory Greenman Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 59 ++++++++++++++++------------ 1 file changed, 33 insertions(+), 26 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 98524a006f7a..4be3c35afd19 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -231,6 +231,9 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } } +/* + * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx + */ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { @@ -277,17 +280,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, } /* - * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool - * - * If there are slots in the RX queue that need to be restocked, - * and we have free pre-allocated buffers, fill the ranks as much - * as we can, pulling from rx_free. - * - * This moves the 'write' index forward to catch up with 'processed', and - * also updates the memory address in the firmware to reference the new - * target buffer. + * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx */ -static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) +static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; @@ -331,6 +327,26 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) } } +/* + * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool + * + * If there are slots in the RX queue that need to be restocked, + * and we have free pre-allocated buffers, fill the ranks as much + * as we can, pulling from rx_free. + * + * This moves the 'write' index forward to catch up with 'processed', and + * also updates the memory address in the firmware to reference the new + * target buffer. + */ +static +void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) +{ + if (trans->cfg->mq_rx_supported) + iwl_pcie_rxq_mq_restock(trans, rxq); + else + iwl_pcie_rxq_sq_restock(trans, rxq); +} + /* * iwl_pcie_rx_alloc_page - allocates and returns a page. * @@ -907,7 +923,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (trans->cfg->mq_rx_supported) { iwl_pcie_rx_mq_hw_init(trans); } else { - iwl_pcie_rxq_restock(trans, def_rxq); + iwl_pcie_rxq_sq_restock(trans, def_rxq); iwl_pcie_rx_hw_init(trans, def_rxq); } @@ -1222,24 +1238,13 @@ restart: count = 0; if (rxq->used_count < rxq->queue_size / 3) emergency = false; + + rxq->read = i; spin_unlock(&rxq->lock); iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); - spin_lock(&rxq->lock); - } - } - /* handle restock for three cases, can be all of them at once: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated - * - we are in emergency and allocated buffers - */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - if (trans->cfg->mq_rx_supported) - iwl_pcie_rxq_mq_restock(trans, rxq); - else iwl_pcie_rxq_restock(trans, rxq); - goto restart; + goto restart; + } } } out: @@ -1264,6 +1269,8 @@ out: if (rxq->napi.poll) napi_gro_flush(&rxq->napi, false); + + iwl_pcie_rxq_restock(trans, rxq); } static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) -- cgit v1.2.3 From 7d3ca7f4b1d5a43a1e0dbd112bb4c3bcf9a81fab Mon Sep 17 00:00:00 2001 From: Emmanuel Grumbach Date: Mon, 7 Mar 2016 10:16:38 +0200 Subject: iwlwifi: mvm: adapt the firmware assert log to new firmware Newer firmware versions put different data in the memory which is read by the driver upon firmware crash. Just change the variable names in the code and the name of the data in the log that we print withouth any functional change. On older firmware, there will be a mismatch between the names that are printed and the content itself, but that's harmless. Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/dvm/main.c | 8 +++---- .../wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h | 27 ++++++++++----------- drivers/net/wireless/intel/iwlwifi/mvm/utils.c | 28 +++++++++++----------- 3 files changed, 30 insertions(+), 33 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index f62c2d727ddb..85628127947f 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1652,10 +1652,10 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.line, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, - table.hw_ver, 0, table.brd_ver); + table.blink2, table.ilink1, table.ilink2, + table.bcon_time, table.gp1, table.gp2, + table.gp3, table.ucode_ver, table.hw_ver, + 0, table.brd_ver); IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); IWL_ERR(priv, "0x%08X | uPc\n", table.pc); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index 2a0703fcec56..f02e2c89abbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -2,6 +2,7 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -121,13 +122,12 @@ TRACE_EVENT(iwlwifi_dev_tx, TRACE_EVENT(iwlwifi_dev_ucode_error, TP_PROTO(const struct device *dev, u32 desc, u32 tsf_low, - u32 data1, u32 data2, u32 line, u32 blink1, - u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time, - u32 gp1, u32 gp2, u32 gp3, u32 major, u32 minor, u32 hw_ver, - u32 brd_ver), + u32 data1, u32 data2, u32 line, u32 blink2, u32 ilink1, + u32 ilink2, u32 bcon_time, u32 gp1, u32 gp2, u32 rev_type, + u32 major, u32 minor, u32 hw_ver, u32 brd_ver), TP_ARGS(dev, desc, tsf_low, data1, data2, line, - blink1, blink2, ilink1, ilink2, bcon_time, gp1, gp2, - gp3, major, minor, hw_ver, brd_ver), + blink2, ilink1, ilink2, bcon_time, gp1, gp2, + rev_type, major, minor, hw_ver, brd_ver), TP_STRUCT__entry( DEV_ENTRY __field(u32, desc) @@ -135,14 +135,13 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __field(u32, data1) __field(u32, data2) __field(u32, line) - __field(u32, blink1) __field(u32, blink2) __field(u32, ilink1) __field(u32, ilink2) __field(u32, bcon_time) __field(u32, gp1) __field(u32, gp2) - __field(u32, gp3) + __field(u32, rev_type) __field(u32, major) __field(u32, minor) __field(u32, hw_ver) @@ -155,29 +154,27 @@ TRACE_EVENT(iwlwifi_dev_ucode_error, __entry->data1 = data1; __entry->data2 = data2; __entry->line = line; - __entry->blink1 = blink1; __entry->blink2 = blink2; __entry->ilink1 = ilink1; __entry->ilink2 = ilink2; __entry->bcon_time = bcon_time; __entry->gp1 = gp1; __entry->gp2 = gp2; - __entry->gp3 = gp3; + __entry->rev_type = rev_type; __entry->major = major; __entry->minor = minor; __entry->hw_ver = hw_ver; __entry->brd_ver = brd_ver; ), TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, " - "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X " - "bcon_tm %010u gp 0x%08X 0x%08X 0x%08X major 0x%08X " + "blink2 0x%05X ilink 0x%05X 0x%05X " + "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X " "minor 0x%08X hw 0x%08X brd 0x%08X", __get_str(dev), __entry->desc, __entry->tsf_low, - __entry->data1, - __entry->data2, __entry->line, __entry->blink1, + __entry->data1, __entry->data2, __entry->line, __entry->blink2, __entry->ilink1, __entry->ilink2, __entry->bcon_time, __entry->gp1, __entry->gp2, - __entry->gp3, __entry->major, __entry->minor, + __entry->rev_type, __entry->major, __entry->minor, __entry->hw_ver, __entry->brd_ver) ); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 59453c176580..53cdc5760f68 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -376,8 +376,8 @@ struct iwl_error_event_table_v1 { struct iwl_error_event_table { u32 valid; /* (nonzero) valid, (0) log is empty */ u32 error_id; /* type of error */ - u32 pc; /* program counter */ - u32 blink1; /* branch link */ + u32 trm_hw_status0; /* TRM HW status */ + u32 trm_hw_status1; /* TRM HW status */ u32 blink2; /* branch link */ u32 ilink1; /* interrupt link */ u32 ilink2; /* interrupt link */ @@ -389,7 +389,7 @@ struct iwl_error_event_table { u32 tsf_hi; /* network timestamp function timer */ u32 gp1; /* GP1 timer register */ u32 gp2; /* GP2 timer register */ - u32 gp3; /* GP3 timer register */ + u32 fw_rev_type; /* firmware revision type */ u32 major; /* uCode version major */ u32 minor; /* uCode version minor */ u32 hw_ver; /* HW Silicon version */ @@ -408,7 +408,7 @@ struct iwl_error_event_table { * time_flag */ u32 isr4; /* isr status register LMPM_NIC_ISR4: * wico interrupt */ - u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */ + u32 last_cmd_id; /* last HCMD id handled by the firmware */ u32 wait_event; /* wait event() caller address */ u32 l2p_control; /* L2pControlField */ u32 l2p_duration; /* L2pDurationField */ @@ -419,7 +419,7 @@ struct iwl_error_event_table { u32 u_timestamp; /* indicate when the date and time of the * compilation */ u32 flow_handler; /* FH read/write pointers, RX credit */ -} __packed /* LOG_ERROR_TABLE_API_S_VER_2 */; +} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */; /* * UMAC error struct - relevant starting from family 8000 chip. @@ -529,9 +529,9 @@ static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm) trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, - table.blink1, table.blink2, table.ilink1, - table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.ucode_ver, 0, + table.blink2, table.ilink1, table.ilink2, + table.bcon_time, table.gp1, table.gp2, + table.gp3, table.ucode_ver, 0, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); @@ -615,14 +615,14 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low, table.data1, table.data2, table.data3, - table.blink1, table.blink2, table.ilink1, + table.blink2, table.ilink1, table.ilink2, table.bcon_time, table.gp1, - table.gp2, table.gp3, table.major, + table.gp2, table.fw_rev_type, table.major, table.minor, table.hw_ver, table.brd_ver); IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id, desc_lookup(table.error_id)); - IWL_ERR(mvm, "0x%08X | uPc\n", table.pc); - IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1); + IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0); + IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1); IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2); IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1); IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2); @@ -634,7 +634,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi); IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1); IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2); - IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3); + IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type); IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major); IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor); IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver); @@ -645,7 +645,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm) IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2); IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3); IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4); - IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref); + IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id); IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event); IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control); IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration); -- cgit v1.2.3 From 81279c49ce472bf5900ef8b19bf91e2ad6bd1c34 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Wed, 9 Mar 2016 14:58:47 +0100 Subject: iwlwifi: mvm: don't try to offload AES-CMAC in AP/IBSS modes The firmware/hardware only supports checking AES-CMAC on RX, not using it on TX. For station mode this is fine, since it's the only thing it will ever do. For AP mode, it never receives such frames, but must be able to transmit them. This is currently broken since we try to enable them for hardware crypto (for RX only) and then treat them as TX_CMD_SEC_EXT, leading to FIFO underruns during TX so the frames never go out to the air. To fix this, simply use software on TX in AP (and IBSS) mode. Signed-off-by: Johannes Berg Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 39b9c383c272..76e649c680a1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -2686,8 +2686,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, * GTK on AP interface is a TX-only key, return 0; * on IBSS they're per-station and because we're lazy * we don't support them for RX, so do the same. + * CMAC in AP/IBSS modes must be done in software. */ - ret = 0; + if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) + ret = -EOPNOTSUPP; + else + ret = 0; key->hw_key_idx = STA_KEY_IDX_INVALID; break; } -- cgit v1.2.3 From 5ed47226e06195935c5a2b88604b340a4364867e Mon Sep 17 00:00:00 2001 From: Ayala Beker Date: Wed, 3 Feb 2016 15:36:52 +0200 Subject: iwlwifi: mvm: update GSCAN capabilities Gscan capabilities were updated with new capabilities supported by the device. While at it, simplify the firmware support conditional and move both conditions into the WARN() to make it easier to undertand and use the unlikely() for both. Signed-off-by: Ayala Beker Signed-off-by: Emmanuel Grumbach --- drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 38 +++++++++++++++--------- drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h | 11 +++++++ drivers/net/wireless/intel/iwlwifi/iwl-fw.h | 13 ++++++++ 3 files changed, 48 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 184c0fef37c0..f899666acb41 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -7,6 +7,7 @@ * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -374,15 +376,12 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) return 0; } -static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, - const u32 len) +static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, + const u32 len) { struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; struct iwl_gscan_capabilities *capa = &fw->gscan_capa; - if (len < sizeof(*fw_capa)) - return -EINVAL; - capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); capa->max_ap_cache_per_scan = @@ -395,7 +394,15 @@ static int iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, le32_to_cpu(fw_capa->max_significant_change_aps); capa->max_bssid_history_entries = le32_to_cpu(fw_capa->max_bssid_history_entries); - return 0; + capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids); + capa->max_number_epno_networks = + le32_to_cpu(fw_capa->max_number_epno_networks); + capa->max_number_epno_networks_by_ssid = + le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid); + capa->max_number_of_white_listed_ssid = + le32_to_cpu(fw_capa->max_number_of_white_listed_ssid); + capa->max_number_of_black_listed_ssid = + le32_to_cpu(fw_capa->max_number_of_black_listed_ssid); } /* @@ -1023,8 +1030,15 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, le32_to_cpup((__le32 *)tlv_data); break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: - if (iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len)) - goto invalid_tlv_len; + /* + * Don't return an error in case of a shorter tlv_len + * to enable loading of FW that has an old format + * of GSCAN capabilities TLV. + */ + if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities)) + break; + + iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); gscan_capa = true; break; default: @@ -1046,12 +1060,8 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } - /* - * If ucode advertises that it supports GSCAN but GSCAN - * capabilities TLV is not present, warn and continue without GSCAN. - */ - if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && - WARN(!gscan_capa, + if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && + !gscan_capa, "GSCAN is supported but capabilities TLV is unavailable\n")) __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, capa->_capa); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h index 5f69bf5e04c7..15ec4e2907d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h @@ -809,6 +809,12 @@ struct iwl_fw_dbg_conf_tlv { * change APs. * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can * hold. + * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. + * @max_number_epno_networks: max number of epno entries. + * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is + * specified. + * @max_number_of_white_listed_ssid: max number of white listed SSIDs. + * @max_number_of_black_listed_ssid: max number of black listed SSIDs. */ struct iwl_fw_gscan_capabilities { __le32 max_scan_cache_size; @@ -819,6 +825,11 @@ struct iwl_fw_gscan_capabilities { __le32 max_hotlist_aps; __le32 max_significant_change_aps; __le32 max_bssid_history_entries; + __le32 max_hotlist_ssids; + __le32 max_number_epno_networks; + __le32 max_number_epno_networks_by_ssid; + __le32 max_number_of_white_listed_ssid; + __le32 max_number_of_black_listed_ssid; } __packed; #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h index 85d6d6d55e2f..2942571c613f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -33,6 +34,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -205,6 +207,12 @@ struct iwl_fw_cscheme_list { * change APs. * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can * hold. + * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. + * @max_number_epno_networks: max number of epno entries. + * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is + * specified. + * @max_number_of_white_listed_ssid: max number of white listed SSIDs. + * @max_number_of_black_listed_ssid: max number of black listed SSIDs. */ struct iwl_gscan_capabilities { u32 max_scan_cache_size; @@ -215,6 +223,11 @@ struct iwl_gscan_capabilities { u32 max_hotlist_aps; u32 max_significant_change_aps; u32 max_bssid_history_entries; + u32 max_hotlist_ssids; + u32 max_number_epno_networks; + u32 max_number_epno_networks_by_ssid; + u32 max_number_of_white_listed_ssid; + u32 max_number_of_black_listed_ssid; }; /** -- cgit v1.2.3 From 87aca73737e379f079993802d2c43606f7c5d26c Mon Sep 17 00:00:00 2001 From: Jean Delvare Date: Thu, 21 Jan 2016 09:20:12 +0100 Subject: NFC: microread: Drop platform data header file Originally I only wanted to drop the unneeded inclusion of , but then noticed that struct microread_nfc_platform_data isn't actually used, and MICROREAD_DRIVER_NAME is redefined in the only file where it is used, so we can get rid of the header file and dead code altogether. Signed-off-by: Jean Delvare Cc: Lauro Ramos Venancio Cc: Aloisio Almeida Jr Signed-off-by: Samuel Ortiz --- MAINTAINERS | 1 - drivers/nfc/microread/i2c.c | 8 -------- include/linux/platform_data/microread.h | 35 --------------------------------- 3 files changed, 44 deletions(-) delete mode 100644 include/linux/platform_data/microread.h (limited to 'drivers') diff --git a/MAINTAINERS b/MAINTAINERS index 355e1c85bad6..5e4e50ff87bb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7644,7 +7644,6 @@ F: net/nfc/ F: include/net/nfc/ F: include/uapi/linux/nfc.h F: drivers/nfc/ -F: include/linux/platform_data/microread.h F: include/linux/platform_data/nfcmrvl.h F: include/linux/platform_data/nxp-nci.h F: include/linux/platform_data/pn544.h diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c index 918e8f2eac47..e0e8afd27849 100644 --- a/drivers/nfc/microread/i2c.c +++ b/drivers/nfc/microread/i2c.c @@ -246,18 +246,10 @@ static int microread_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct microread_i2c_phy *phy; - struct microread_nfc_platform_data *pdata = - dev_get_platdata(&client->dev); int r; dev_dbg(&client->dev, "client %p\n", client); - if (!pdata) { - nfc_err(&client->dev, "client %p: missing platform data\n", - client); - return -EINVAL; - } - phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy), GFP_KERNEL); if (!phy) diff --git a/include/linux/platform_data/microread.h b/include/linux/platform_data/microread.h deleted file mode 100644 index ca13992089b8..000000000000 --- a/include/linux/platform_data/microread.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Driver include for the Inside Secure microread NFC Chip. - * - * Copyright (C) 2011 Tieto Poland - * Copyright (C) 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#ifndef _MICROREAD_H -#define _MICROREAD_H - -#include - -#define MICROREAD_DRIVER_NAME "microread" - -/* board config platform data for microread */ -struct microread_nfc_platform_data { - unsigned int rst_gpio; - unsigned int irq_gpio; - unsigned int ioh_gpio; -}; - -#endif /* _MICROREAD_H */ -- cgit v1.2.3 From 99312c377f82f91e7754aefe42f64b619b295fd6 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 3 Mar 2016 20:45:55 +0100 Subject: can: ifi: Fix clock generator configuration The clock generation does not match reality when using the CAN IP core outside of the FPGA design. This patch fixes the computation of values which are programmed into the clock generator registers. First, there are some off-by-one errors which manifest themselves only when communicating with different controller, so those are fixed. Second, the bits in the clock generator registers have different meaning depending on whether the core is in ISO CANFD mode or any of the other modes (BOSCH CANFD or CAN2.0). Detect the ISO CANFD mode and fix handling of this special case of clock configuration. Finally, the CAN clock speed is in CANCLOCK register, not SYSCLOCK register, so fix this as well. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Reviewed-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 44 ++++++++++++++++++----------------- 1 file changed, 23 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 0d1c164374b7..57581cb80956 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -514,25 +514,25 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id) static const struct can_bittiming_const ifi_canfd_bittiming_const = { .name = KBUILD_MODNAME, - .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 64, - .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ - .tseg2_max = 16, + .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 64, .sjw_max = 16, - .brp_min = 1, - .brp_max = 1024, + .brp_min = 2, + .brp_max = 256, .brp_inc = 1, }; static const struct can_bittiming_const ifi_canfd_data_bittiming_const = { .name = KBUILD_MODNAME, - .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ - .tseg1_max = 16, - .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ - .tseg2_max = 8, - .sjw_max = 4, - .brp_min = 1, - .brp_max = 32, + .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */ + .tseg1_max = 64, + .tseg2_min = 2, /* Time segment 2 = phase_seg2 */ + .tseg2_max = 64, + .sjw_max = 16, + .brp_min = 2, + .brp_max = 256, .brp_inc = 1, }; @@ -545,32 +545,34 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev) u32 noniso_arg = 0; u32 time_off; - if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) { + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && + !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) { + time_off = IFI_CANFD_TIME_SJW_OFF_ISO; + } else { noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH | IFI_CANFD_TIME_SET_TIMEA_BOSCH | IFI_CANFD_TIME_SET_PRESC_BOSCH | IFI_CANFD_TIME_SET_SJW_BOSCH; time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH; - } else { - time_off = IFI_CANFD_TIME_SJW_OFF_ISO; } /* Configure bit timing */ - brp = bt->brp - 1; + brp = bt->brp - 2; sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; - tseg2 = bt->phase_seg2 - 1; + tseg2 = bt->phase_seg2 - 2; writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) | (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) | (brp << IFI_CANFD_TIME_PRESCALE_OFF) | - (sjw << time_off), + (sjw << time_off) | + noniso_arg, priv->base + IFI_CANFD_TIME); /* Configure data bit timing */ - brp = dbt->brp - 1; + brp = dbt->brp - 2; sjw = dbt->sjw - 1; tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; - tseg2 = dbt->phase_seg2 - 1; + tseg2 = dbt->phase_seg2 - 2; writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) | (tseg1 << IFI_CANFD_TIME_TIMEA_OFF) | (brp << IFI_CANFD_TIME_PRESCALE_OFF) | @@ -847,7 +849,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev) priv->can.state = CAN_STATE_STOPPED; - priv->can.clock.freq = readl(addr + IFI_CANFD_SYSCLOCK); + priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK); priv->can.bittiming_const = &ifi_canfd_bittiming_const; priv->can.data_bittiming_const = &ifi_canfd_data_bittiming_const; -- cgit v1.2.3 From f1deaee0c3ab1950987d7207d613df0417bd251c Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 3 Mar 2016 20:45:56 +0100 Subject: can: ifi: Fix TX DLC configuration The TX DLC, the transmission length information, was not written into the transmit configuration register. When using the CAN core with different CAN controller, the receiving CAN controller will receive only the ID part of the CAN frame, but no data at all. This patch adds the TX DLC into the register to fix this issue. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Reviewed-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 57581cb80956..fb93122a2bd3 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -749,8 +749,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, { struct ifi_canfd_priv *priv = netdev_priv(ndev); struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u32 txst, txid; - u32 txdlc = 0; + u32 txst, txid, txdlc; int i; if (can_dropped_invalid_skb(ndev, skb)) @@ -773,12 +772,11 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, txid = cf->can_id & CAN_SFF_MASK; } - if (priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) { - if (can_is_canfd_skb(skb)) { - txdlc |= IFI_CANFD_TXFIFO_DLC_EDL; - if (cf->flags & CANFD_BRS) - txdlc |= IFI_CANFD_TXFIFO_DLC_BRS; - } + txdlc = can_len2dlc(cf->len); + if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && can_is_canfd_skb(skb)) { + txdlc |= IFI_CANFD_TXFIFO_DLC_EDL; + if (cf->flags & CANFD_BRS) + txdlc |= IFI_CANFD_TXFIFO_DLC_BRS; } if (cf->can_id & CAN_RTR_FLAG) -- cgit v1.2.3 From 223654355c4eb35f890233e6a52b0f8d79bc3b44 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 3 Mar 2016 20:45:57 +0100 Subject: can: ifi: Fix RX and TX ID mask The RX and TX ID mask for CAN2.0 is 11 bits wide. This patch fixes the incorrect mask, which caused the CAN IDs to miss the MSBit both on receive and transmit. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Reviewed-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index fb93122a2bd3..aee43c0c631a 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -135,8 +135,8 @@ #define IFI_CANFD_RXFIFO_ID 0x6c #define IFI_CANFD_RXFIFO_ID_ID_OFFSET 0 -#define IFI_CANFD_RXFIFO_ID_ID_STD_MASK 0x3ff -#define IFI_CANFD_RXFIFO_ID_ID_XTD_MASK 0x1fffffff +#define IFI_CANFD_RXFIFO_ID_ID_STD_MASK CAN_SFF_MASK +#define IFI_CANFD_RXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK #define IFI_CANFD_RXFIFO_ID_IDE BIT(29) #define IFI_CANFD_RXFIFO_DATA 0x70 /* 0x70..0xac */ @@ -156,8 +156,8 @@ #define IFI_CANFD_TXFIFO_ID 0xbc #define IFI_CANFD_TXFIFO_ID_ID_OFFSET 0 -#define IFI_CANFD_TXFIFO_ID_ID_STD_MASK 0x3ff -#define IFI_CANFD_TXFIFO_ID_ID_XTD_MASK 0x1fffffff +#define IFI_CANFD_TXFIFO_ID_ID_STD_MASK CAN_SFF_MASK +#define IFI_CANFD_TXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK #define IFI_CANFD_TXFIFO_ID_IDE BIT(29) #define IFI_CANFD_TXFIFO_DATA 0xc0 /* 0xb0..0xfc */ -- cgit v1.2.3 From 6cc6426605b7edabde41c72add723979d1d509e7 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 3 Mar 2016 20:45:58 +0100 Subject: can: ifi: Add obscure bit swap for EFF frame IDs In case of CAN2.0 EFF frame, the controller handles frame IDs in a rather bizzare way. The ID is split into an extended part, IDX[28:11] and standard part, ID[10:0]. In the TX path, the core first sends the top 11 bits of the IDX, followed by ID and finally the rest of IDX. In the RX path, the core stores the ID the LSbit part of IDX field, followed by the LSbit parts of real IDX. The MSbit parts of IDX are stored in ID field of the register. This patch implements the necessary bit shuffling to mitigate this obscure behavior. In case two of these controllers are connected together, the RX and TX bit swapping nullifies itself and the issue does not manifest. The issue only manifests when talking to another different CAN controller. Signed-off-by: Marek Vasut Cc: Marc Kleine-Budde Cc: Mark Rutland Cc: Oliver Hartkopp Cc: Wolfgang Grandegger Reviewed-by: Oliver Hartkopp Signed-off-by: Marc Kleine-Budde --- drivers/net/can/ifi_canfd/ifi_canfd.c | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index aee43c0c631a..a1bd54ffd31e 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -136,7 +136,11 @@ #define IFI_CANFD_RXFIFO_ID 0x6c #define IFI_CANFD_RXFIFO_ID_ID_OFFSET 0 #define IFI_CANFD_RXFIFO_ID_ID_STD_MASK CAN_SFF_MASK +#define IFI_CANFD_RXFIFO_ID_ID_STD_OFFSET 0 +#define IFI_CANFD_RXFIFO_ID_ID_STD_WIDTH 10 #define IFI_CANFD_RXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK +#define IFI_CANFD_RXFIFO_ID_ID_XTD_OFFSET 11 +#define IFI_CANFD_RXFIFO_ID_ID_XTD_WIDTH 18 #define IFI_CANFD_RXFIFO_ID_IDE BIT(29) #define IFI_CANFD_RXFIFO_DATA 0x70 /* 0x70..0xac */ @@ -157,7 +161,11 @@ #define IFI_CANFD_TXFIFO_ID 0xbc #define IFI_CANFD_TXFIFO_ID_ID_OFFSET 0 #define IFI_CANFD_TXFIFO_ID_ID_STD_MASK CAN_SFF_MASK +#define IFI_CANFD_TXFIFO_ID_ID_STD_OFFSET 0 +#define IFI_CANFD_TXFIFO_ID_ID_STD_WIDTH 10 #define IFI_CANFD_TXFIFO_ID_ID_XTD_MASK CAN_EFF_MASK +#define IFI_CANFD_TXFIFO_ID_ID_XTD_OFFSET 11 +#define IFI_CANFD_TXFIFO_ID_ID_XTD_WIDTH 18 #define IFI_CANFD_TXFIFO_ID_IDE BIT(29) #define IFI_CANFD_TXFIFO_DATA 0xc0 /* 0xb0..0xfc */ @@ -229,10 +237,20 @@ static void ifi_canfd_read_fifo(struct net_device *ndev) rxid = readl(priv->base + IFI_CANFD_RXFIFO_ID); id = (rxid >> IFI_CANFD_RXFIFO_ID_ID_OFFSET); - if (id & IFI_CANFD_RXFIFO_ID_IDE) + if (id & IFI_CANFD_RXFIFO_ID_IDE) { id &= IFI_CANFD_RXFIFO_ID_ID_XTD_MASK; - else + /* + * In case the Extended ID frame is received, the standard + * and extended part of the ID are swapped in the register, + * so swap them back to obtain the correct ID. + */ + id = (id >> IFI_CANFD_RXFIFO_ID_ID_XTD_OFFSET) | + ((id & IFI_CANFD_RXFIFO_ID_ID_STD_MASK) << + IFI_CANFD_RXFIFO_ID_ID_XTD_WIDTH); + id |= CAN_EFF_FLAG; + } else { id &= IFI_CANFD_RXFIFO_ID_ID_STD_MASK; + } cf->can_id = id; if (rxdlc & IFI_CANFD_RXFIFO_DLC_ESI) { @@ -767,6 +785,15 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, if (cf->can_id & CAN_EFF_FLAG) { txid = cf->can_id & CAN_EFF_MASK; + /* + * In case the Extended ID frame is transmitted, the + * standard and extended part of the ID are swapped + * in the register, so swap them back to send the + * correct ID. + */ + txid = (txid >> IFI_CANFD_TXFIFO_ID_ID_XTD_WIDTH) | + ((txid & IFI_CANFD_TXFIFO_ID_ID_XTD_MASK) << + IFI_CANFD_TXFIFO_ID_ID_XTD_OFFSET); txid |= IFI_CANFD_TXFIFO_ID_IDE; } else { txid = cf->can_id & CAN_SFF_MASK; -- cgit v1.2.3 From e481ab23c57b37c989fa27e0a6b3e941a908775a Mon Sep 17 00:00:00 2001 From: Ramesh Shanmugasundaram Date: Mon, 29 Feb 2016 12:37:25 +0000 Subject: can: rcar_can: Add r8a7795 support Added r8a7795 SoC support. Signed-off-by: Ramesh Shanmugasundaram Acked-by: Rob Herring Signed-off-by: Marc Kleine-Budde --- Documentation/devicetree/bindings/net/can/rcar_can.txt | 11 +++++++++++ drivers/net/can/Kconfig | 2 +- drivers/net/can/rcar_can.c | 1 + 3 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt index 65edc055722f..8d40ab27bc8c 100644 --- a/Documentation/devicetree/bindings/net/can/rcar_can.txt +++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt @@ -9,8 +9,10 @@ Required properties: "renesas,can-r8a7792" if CAN controller is a part of R8A7792 SoC. "renesas,can-r8a7793" if CAN controller is a part of R8A7793 SoC. "renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC. + "renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC. "renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device. "renesas,rcar-gen2-can" for a generic R-Car Gen2 compatible device. + "renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device. When compatible with the generic version, nodes must list the SoC-specific version corresponding to the platform first followed by the generic version. @@ -22,6 +24,15 @@ Required properties: - pinctrl-0: pin control group to be used for this controller. - pinctrl-names: must be "default". +Required properties for "renesas,can-r8a7795" compatible: +In R8A7795 SoC, "clkp2" can be CANFD clock. This is a div6 clock and can be +used by both CAN and CAN FD controller at the same time. It needs to be scaled +to maximum frequency if any of these controllers use it. This is done using +the below properties. + +- assigned-clocks: phandle of clkp2(CANFD) clock. +- assigned-clock-rates: maximum frequency of this clock. + Optional properties: - renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are: <0x0> (default) : Peripheral clock (clkp1) diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 164ccdeca663..0d40aef928e2 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -106,7 +106,7 @@ config CAN_JANZ_ICAN3 config CAN_RCAR tristate "Renesas R-Car CAN controller" - depends on ARM + depends on ARCH_RENESAS || ARM ---help--- Say Y here if you want to use CAN controller found on Renesas R-Car SoCs. diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c index ad3d2e0cb191..788459f6bf5c 100644 --- a/drivers/net/can/rcar_can.c +++ b/drivers/net/can/rcar_can.c @@ -906,6 +906,7 @@ static const struct of_device_id rcar_can_of_table[] __maybe_unused = { { .compatible = "renesas,can-r8a7791" }, { .compatible = "renesas,rcar-gen1-can" }, { .compatible = "renesas,rcar-gen2-can" }, + { .compatible = "renesas,rcar-gen3-can" }, { } }; MODULE_DEVICE_TABLE(of, rcar_can_of_table); -- cgit v1.2.3 From 3691ac4a9c957c2896f8ebfb8a92081301f1682e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 8 Mar 2016 15:04:08 +0300 Subject: libertas: fix an error code in probe We accidentally return success instead of a negative error code. Signed-off-by: Dan Carpenter Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/libertas/main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index b35b8bcce24c..8541cbed786d 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -1118,7 +1118,8 @@ int lbs_start_card(struct lbs_private *priv) else pr_info("%s: mesh disabled\n", dev->name); - if (lbs_cfg_register(priv)) { + ret = lbs_cfg_register(priv); + if (ret) { pr_err("cannot register device\n"); goto done; } -- cgit v1.2.3 From 107b87133361aa5502d7d3c82cdfff77bf8ebe18 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Wed, 9 Mar 2016 15:25:26 +0800 Subject: brcmfmac: Remove waitqueue_active check We met a problem of pm_suspend when repeated closing/opening the lid on a Lenovo laptop (1/20 reproduce rate), below is the log: [ 199.735876] PM: Entering mem sleep [ 199.750516] e1000e: EEE TX LPI TIMER: 00000011 [ 199.856638] Trying to free nonexistent resource <000000000000d000-000000000000d0ff> [ 201.753566] brcmfmac: brcmf_pcie_suspend: Timeout on response for entering D3 substate [ 201.753581] pci_legacy_suspend(): brcmf_pcie_suspend+0x0/0x1f0 [brcmfmac] returns -5 [ 201.753585] dpm_run_callback(): pci_pm_suspend+0x0/0x160 returns -5 [ 201.753589] PM: Device 0000:04:00.0 failed to suspend async: error -5 Through debugging, we found when problem happens, it is not the device fails to enter D3, but the signal D3_ACK comes too early to pass the waitqueue_active() check. Just like this: brcmf_pcie_send_mb_data(devinfo, BRCMF_H2D_HOST_D3_INFORM); // signal is triggered here wait_event_timeout(devinfo->mbdata_resp_wait, devinfo->mbdata_completed, BRCMF_PCIE_MBDATA_TIMEOUT); So far I think it is safe to remove waitqueue_active check since there is only one place to trigger this signal (sending BRCMF_H2D_HOST_D3_INFORM). And it is not a problem calling wake_up event earlier than calling wait_event. Cc: Brett Rudley Cc: Hante Meuleman Cc: Franky (Zhenhui) Lin Cc: Pieter-Paul Giesberts Cc: Arend van Spriel Signed-off-by: Hui Wang Signed-off-by: Kalle Valo --- drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 52fef5e1d615..0af8db82da0c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -677,10 +677,8 @@ static void brcmf_pcie_handle_mb_data(struct brcmf_pciedev_info *devinfo) brcmf_dbg(PCIE, "D2H_MB_DATA: DEEP SLEEP EXIT\n"); if (dtoh_mb_data & BRCMF_D2H_DEV_D3_ACK) { brcmf_dbg(PCIE, "D2H_MB_DATA: D3 ACK\n"); - if (waitqueue_active(&devinfo->mbdata_resp_wait)) { - devinfo->mbdata_completed = true; - wake_up(&devinfo->mbdata_resp_wait); - } + devinfo->mbdata_completed = true; + wake_up(&devinfo->mbdata_resp_wait); } } -- cgit v1.2.3 From 54f008497b9f09f0880dd84ce5608be4ad7cff13 Mon Sep 17 00:00:00 2001 From: Amitkumar Karwar Date: Wed, 9 Mar 2016 04:21:11 -0800 Subject: mwifiex: Empty Tx queue during suspend In cfg80211 suspend handler, stop the netif queue and wait until all the Tx queues become empty. Start the queues in resume handler. Signed-off-by: Amitkumar Karwar Signed-off-by: Kalle Valo --- drivers/net/wireless/marvell/mwifiex/cfg80211.c | 28 ++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index 29b7f6eed240..bb7235e1b9d1 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -20,6 +20,7 @@ #include "cfg80211.h" #include "main.h" #include "11n.h" +#include "wmm.h" static char *reg_alpha2; module_param(reg_alpha2, charp, 0); @@ -3259,7 +3260,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, { struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); struct mwifiex_ds_hs_cfg hs_cfg; - int i, ret = 0; + int i, ret = 0, retry_num = 10; struct mwifiex_private *priv; for (i = 0; i < adapter->priv_num; i++) { @@ -3269,6 +3270,21 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, mwifiex_cancel_all_pending_cmd(adapter); + for (i = 0; i < adapter->priv_num; i++) { + priv = adapter->priv[i]; + if (priv && priv->netdev) + mwifiex_stop_net_dev_queue(priv->netdev, adapter); + } + + for (i = 0; i < retry_num; i++) { + if (!mwifiex_wmm_lists_empty(adapter) || + !mwifiex_bypass_txlist_empty(adapter) || + !skb_queue_empty(&adapter->tx_data_q)) + usleep_range(10000, 15000); + else + break; + } + if (!wowlan) { mwifiex_dbg(adapter, ERROR, "None of the WOWLAN triggers enabled\n"); @@ -3321,12 +3337,18 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy, static int mwifiex_cfg80211_resume(struct wiphy *wiphy) { struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy); - struct mwifiex_private *priv = - mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); + struct mwifiex_private *priv; struct mwifiex_ds_wakeup_reason wakeup_reason; struct cfg80211_wowlan_wakeup wakeup_report; int i; + for (i = 0; i < adapter->priv_num; i++) { + priv = adapter->priv[i]; + if (priv && priv->netdev) + mwifiex_wake_up_net_dev_queue(priv->netdev, adapter); + } + + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA); mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD, &wakeup_reason); memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup)); -- cgit v1.2.3 From 87cba1696045cbf963d09c82ceca98db92b7ace9 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Mon, 7 Mar 2016 00:28:08 +0200 Subject: wlcore: don't WARN_ON in case of existing ROC When working with AP + P2P, it's possible to get into a state when the AP is in ROC (due to assiciating station) while trying to ROC on the P2P interface. Replace the WARN_ON with wl1271_error to avoid warnings in this case. Signed-off-by: Eliad Peller Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wlcore/main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 45662cf3169f..dde36203ca42 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -5495,7 +5495,7 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, { struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); struct wl1271 *wl = hw->priv; - int channel, ret = 0; + int channel, active_roc, ret = 0; channel = ieee80211_frequency_to_channel(chan->center_freq); @@ -5508,9 +5508,9 @@ static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, goto out; /* return EBUSY if we can't ROC right now */ - if (WARN_ON(wl->roc_vif || - find_first_bit(wl->roc_map, - WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) { + active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES); + if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) { + wl1271_warning("active roc on role %d", active_roc); ret = -EBUSY; goto out; } -- cgit v1.2.3 From 8cf77e176f921d20a1e61d288eaee74a2d1d2ae1 Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Mon, 7 Mar 2016 00:28:09 +0200 Subject: wlcore/wl18xx: add radar_debug_mode handling Add debugfs key (under CFG80211_CERTIFICATION_ONUS configuration) to set/clear radar_debug_mode. In this mode, the driver simply ignores radar events (but prints them). The fw is notified about this mode through a special generic_cfg_feature command. This mode is relevant only for ap mode. look for it when initializing ap vif. Signed-off-by: Eliad Peller Signed-off-by: Kalle Valo --- drivers/net/wireless/ti/wl18xx/debugfs.c | 66 ++++++++++++++++++++++++++++++++ drivers/net/wireless/ti/wl18xx/event.c | 3 +- drivers/net/wireless/ti/wlcore/init.c | 5 +++ drivers/net/wireless/ti/wlcore/wlcore.h | 1 + 4 files changed, 74 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c index 4edfe28395f0..86ccf84ea0c6 100644 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c @@ -345,6 +345,69 @@ static const struct file_operations dynamic_fw_traces_ops = { .llseek = default_llseek, }; +#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS +static ssize_t radar_debug_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + struct wl12xx_vif *wlvif; + unsigned long value; + int ret; + + ret = kstrtoul_from_user(user_buf, count, 10, &value); + if (ret < 0) { + wl1271_warning("illegal radar_debug_mode value!"); + return -EINVAL; + } + + /* valid values: 0/1 */ + if (!(value == 0 || value == 1)) { + wl1271_warning("value is not in valid!"); + return -EINVAL; + } + + mutex_lock(&wl->mutex); + + wl->radar_debug_mode = value; + + if (unlikely(wl->state != WLCORE_STATE_ON)) + goto out; + + ret = wl1271_ps_elp_wakeup(wl); + if (ret < 0) + goto out; + + wl12xx_for_each_wlvif_ap(wl, wlvif) { + wlcore_cmd_generic_cfg(wl, wlvif, + WLCORE_CFG_FEATURE_RADAR_DEBUG, + wl->radar_debug_mode, 0); + } + + wl1271_ps_elp_sleep(wl); +out: + mutex_unlock(&wl->mutex); + return count; +} + +static ssize_t radar_debug_mode_read(struct file *file, + char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct wl1271 *wl = file->private_data; + + return wl1271_format_buffer(userbuf, count, ppos, + "%d\n", wl->radar_debug_mode); +} + +static const struct file_operations radar_debug_mode_ops = { + .write = radar_debug_mode_write, + .read = radar_debug_mode_read, + .open = simple_open, + .llseek = default_llseek, +}; +#endif /* CFG80211_CERTIFICATION_ONUS */ + int wl18xx_debugfs_add_files(struct wl1271 *wl, struct dentry *rootdir) { @@ -510,6 +573,9 @@ int wl18xx_debugfs_add_files(struct wl1271 *wl, DEBUGFS_ADD(conf, moddir); DEBUGFS_ADD(radar_detection, moddir); +#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS + DEBUGFS_ADD(radar_debug_mode, moddir); +#endif DEBUGFS_ADD(dynamic_fw_traces, moddir); return 0; diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c index 719907a0a2c2..ff6e46dd61f8 100644 --- a/drivers/net/wireless/ti/wl18xx/event.c +++ b/drivers/net/wireless/ti/wl18xx/event.c @@ -146,7 +146,8 @@ int wl18xx_process_mailbox_events(struct wl1271 *wl) mbox->radar_channel, wl18xx_radar_type_decode(mbox->radar_type)); - ieee80211_radar_detected(wl->hw); + if (!wl->radar_debug_mode) + ieee80211_radar_detected(wl->hw); } if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) { diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c index e92f2639af2c..d0b7734030ef 100644 --- a/drivers/net/wireless/ti/wlcore/init.c +++ b/drivers/net/wireless/ti/wlcore/init.c @@ -558,6 +558,11 @@ static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) if (ret < 0) return ret; + if (wl->radar_debug_mode) + wlcore_cmd_generic_cfg(wl, wlvif, + WLCORE_CFG_FEATURE_RADAR_DEBUG, + wl->radar_debug_mode, 0); + return 0; } diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h index dda01b118c26..72c31a8edcfb 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore.h +++ b/drivers/net/wireless/ti/wlcore/wlcore.h @@ -463,6 +463,7 @@ struct wl1271 { /* the current dfs region */ enum nl80211_dfs_regions dfs_region; + bool radar_debug_mode; /* size of the private FW status data */ size_t fw_status_len; -- cgit v1.2.3 From 3307d84024d2ed9c8aee0a4bfa694319613734bf Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:03:59 -0500 Subject: rtl8xxxu: Add initial code to parse rtl8192eu efuse This is the start of 8192eu support. For now just detect the device and parse the efuse. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 80 +++++++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 50 ++++++++++++++- 2 files changed, 124 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e654bd33b434..bb95d00f0565 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -42,7 +42,7 @@ #define DRIVER_NAME "rtl8xxxu" -static int rtl8xxxu_debug; +static int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE; static bool rtl8xxxu_ht40_2g; MODULE_AUTHOR("Jes Sorensen "); @@ -1890,6 +1890,53 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) #endif +static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) +{ + int i; + + if (priv->efuse_wifi.efuse8192eu.rtl_id != cpu_to_le16(0x8129)) + return -EINVAL; + + ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192eu.mac_addr); + + memcpy(priv->cck_tx_power_index_A, + priv->efuse_wifi.efuse8192eu.cck_tx_power_index_A, + sizeof(priv->cck_tx_power_index_A)); + memcpy(priv->cck_tx_power_index_B, + priv->efuse_wifi.efuse8192eu.cck_tx_power_index_B, + sizeof(priv->cck_tx_power_index_B)); + + memcpy(priv->ht40_1s_tx_power_index_A, + priv->efuse_wifi.efuse8192eu.ht40_1s_tx_power_index_A, + sizeof(priv->ht40_1s_tx_power_index_A)); + memcpy(priv->ht40_1s_tx_power_index_B, + priv->efuse_wifi.efuse8192eu.ht40_1s_tx_power_index_B, + sizeof(priv->ht40_1s_tx_power_index_B)); + + dev_info(&priv->udev->dev, "Vendor: %.7s\n", + priv->efuse_wifi.efuse8192eu.vendor_name); + dev_info(&priv->udev->dev, "Product: %.11s\n", + priv->efuse_wifi.efuse8192eu.device_name); + dev_info(&priv->udev->dev, "Serial: %.11s\n", + priv->efuse_wifi.efuse8192eu.serial); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { + unsigned char *raw = priv->efuse_wifi.raw; + + dev_info(&priv->udev->dev, + "%s: dumping efuse (0x%02zx bytes):\n", + __func__, sizeof(struct rtl8192eu_efuse)); + for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) { + dev_info(&priv->udev->dev, "%02x: " + "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, + raw[i], raw[i + 1], raw[i + 2], + raw[i + 3], raw[i + 4], raw[i + 5], + raw[i + 6], raw[i + 7]); + } + } + return -EINVAL; +} + static int rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data) { @@ -1973,7 +2020,7 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) } /* Default value is 0xff */ - memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN_8723A); + memset(priv->efuse_wifi.raw, 0xff, EFUSE_MAP_LEN); efuse_addr = 0; while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) { @@ -2005,7 +2052,7 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) /* We have 8 bits to indicate validity */ map_addr = offset * 8; - if (map_addr >= EFUSE_MAP_LEN_8723A) { + if (map_addr >= EFUSE_MAP_LEN) { dev_warn(dev, "%s: Illegal map_addr (%04x), " "efuse corrupt!\n", __func__, map_addr); @@ -2286,6 +2333,24 @@ static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv) #endif +static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv) +{ + char *fw_name; + int ret; + + return -EBUSY; + if (!priv->vendor_umc) + fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; + else if (priv->chip_cut || priv->rtlchip == 0x8192c) + fw_name = "rtlwifi/rtl8192cufw_B.bin"; + else + fw_name = "rtlwifi/rtl8192cufw_A.bin"; + + ret = rtl8xxxu_load_firmware(priv, fw_name); + + return ret; +} + static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv) { u16 val16; @@ -5863,6 +5928,13 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { #endif +static struct rtl8xxxu_fileops rtl8192eu_fops = { + .parse_efuse = rtl8192eu_parse_efuse, + .load_firmware = rtl8192eu_load_firmware, + .power_on = rtl8192cu_power_on, + .writeN_block_size = 128, +}; + static struct usb_device_id dev_table[] = { {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8723au_fops}, @@ -5870,6 +5942,8 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8723au_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x0724, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8723au_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8192eu_fops}, #ifdef CONFIG_RTL8XXXU_UNTESTED /* Still supported by rtlwifi */ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index bbd0f6b76b82..8168a35c314c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -55,8 +55,8 @@ #define RTL8723A_MAX_RF_PATHS 2 #define RF6052_MAX_TX_PWR 0x3f -#define EFUSE_MAP_LEN_8723A 256 -#define EFUSE_MAX_SECTION_8723A 32 +#define EFUSE_MAP_LEN 512 +#define EFUSE_MAX_SECTION_8723A 64 #define EFUSE_REAL_CONTENT_LEN_8723A 512 #define EFUSE_BT_MAP_LEN_8723A 1024 #define EFUSE_MAX_WORD_UNIT 4 @@ -500,6 +500,49 @@ struct rtl8192cu_efuse { u8 customer_id; }; +struct rtl8192eu_efuse { + __le16 rtl_id; + u8 res0[0x0e]; + u8 cck_tx_power_index_A[3]; /* 0x10 */ + u8 cck_tx_power_index_B[3]; + u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ + u8 ht40_1s_tx_power_index_B[3]; + u8 res1[0x9c]; + u8 channel_plan; /* 0xb8 */ + u8 xtal_k; + u8 thermal_meter; + u8 iqk_lck; + u8 pa_type; /* 0xbc */ + u8 lna_type_2g; /* 0xbd */ + u8 res2[1]; + u8 lna_type_5g; /* 0xbf */ + u8 res13[1]; + u8 rf_board_option; + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; + u8 res3[3]; + u8 rf_antenna_option; /* 0xc9 */ + u8 res4[6]; + u8 vid; /* 0xd0 */ + u8 res5[1]; + u8 pid; /* 0xd2 */ + u8 res6[1]; + u8 usb_optional_function; + u8 res7[2]; + u8 mac_addr[ETH_ALEN]; /* 0xd7 */ + u8 res8[2]; + u8 vendor_name[7]; + u8 res9[2]; + u8 device_name[0x0b]; /* 0xe8 */ + u8 res10[2]; + u8 serial[0x0b]; /* 0xf5 */ + u8 res11[0x30]; + u8 unknown[0x0d]; /* 0x130 */ + u8 res12[0xc3]; +}; + struct rtl8xxxu_reg8val { u16 reg; u8 val; @@ -643,9 +686,10 @@ struct rtl8xxxu_priv { u8 val8; } usb_buf; union { - u8 raw[EFUSE_MAP_LEN_8723A]; + u8 raw[EFUSE_MAP_LEN]; struct rtl8723au_efuse efuse8723; struct rtl8192cu_efuse efuse8192; + struct rtl8192eu_efuse efuse8192eu; } efuse_wifi; u32 adda_backup[RTL8XXXU_ADDA_REGS]; u32 mac_backup[RTL8XXXU_MAC_REGS]; -- cgit v1.2.3 From 0e5d435a619fadb475f3a260eda2252acd1662f0 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:00 -0500 Subject: rtl8xxxu: Identify chip vendors correctly This identifies the chip vendors correctly and also picks the correct firmware for rtl8192eu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 73 +++++++++++++++++----- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 4 ++ 3 files changed, 64 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index bb95d00f0565..745942299f41 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1662,16 +1662,24 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) case 1: cut = "B"; break; + case 2: + cut = "C"; + break; + case 3: + cut = "D"; + break; + case 4: + cut = "E"; + break; default: cut = "unknown"; } dev_info(dev, "RTL%s rev %s (%s) %iT%iR, TX queues %i, WiFi=%i, BT=%i, GPS=%i, HI PA=%i\n", - priv->chip_name, cut, priv->vendor_umc ? "UMC" : "TSMC", - priv->tx_paths, priv->rx_paths, priv->ep_tx_count, - priv->has_wifi, priv->has_bluetooth, priv->has_gps, - priv->hi_pa); + priv->chip_name, cut, priv->chip_vendor, priv->tx_paths, + priv->rx_paths, priv->ep_tx_count, priv->has_wifi, + priv->has_bluetooth, priv->has_gps, priv->hi_pa); dev_info(dev, "RTL%s MAC: %pM\n", priv->chip_name, priv->mac_addr); } @@ -1708,7 +1716,21 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } else if (val32 & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; - if (bonding == HPON_FSM_BONDING_1T2R) { + if (priv->chip_cut >= 3) { + if (bonding == HPON_FSM_BONDING_1T2R) { + sprintf(priv->chip_name, "8191EU"); + priv->rf_paths = 2; + priv->rx_paths = 2; + priv->tx_paths = 1; + priv->rtlchip = 0x8191e; + } else { + sprintf(priv->chip_name, "8192EU"); + priv->rf_paths = 2; + priv->rx_paths = 2; + priv->tx_paths = 2; + priv->rtlchip = 0x8192e; + } + } else if (bonding == HPON_FSM_BONDING_1T2R) { sprintf(priv->chip_name, "8191CU"); priv->rf_paths = 2; priv->rx_paths = 2; @@ -1731,8 +1753,34 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->has_wifi = 1; } - if (val32 & SYS_CFG_VENDOR_ID) - priv->vendor_umc = 1; + switch (priv->rtlchip) { + case 0x8188e: + case 0x8192e: + case 0x8723b: + switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { + case SYS_CFG_VENDOR_ID_TSMC: + sprintf(priv->chip_vendor, "TSMC"); + break; + case SYS_CFG_VENDOR_ID_SMIC: + sprintf(priv->chip_vendor, "SMIC"); + priv->vendor_smic = 1; + break; + case SYS_CFG_VENDOR_ID_UMC: + sprintf(priv->chip_vendor, "UMC"); + priv->vendor_umc = 1; + break; + default: + sprintf(priv->chip_vendor, "unknown"); + } + break; + default: + if (val32 & SYS_CFG_VENDOR_ID) { + sprintf(priv->chip_vendor, "UMC"); + priv->vendor_umc = 1; + } else { + sprintf(priv->chip_vendor, "TSMC"); + } + } val32 = rtl8xxxu_read32(priv, REG_GPIO_OUTSTS); priv->rom_rev = (val32 & GPIO_RF_RL_ID) >> 28; @@ -1934,7 +1982,7 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) raw[i + 6], raw[i + 7]); } } - return -EINVAL; + return 0; } static int @@ -2269,6 +2317,7 @@ static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name) signature = le16_to_cpu(priv->fw_data->signature); switch (signature & 0xfff0) { + case 0x92e0: case 0x92c0: case 0x88c0: case 0x2300: @@ -2338,13 +2387,7 @@ static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv) char *fw_name; int ret; - return -EBUSY; - if (!priv->vendor_umc) - fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; - else if (priv->chip_cut || priv->rtlchip == 0x8192c) - fw_name = "rtlwifi/rtl8192cufw_B.bin"; - else - fw_name = "rtlwifi/rtl8192cufw_A.bin"; + fw_name = "rtlwifi/rtl8192eu_nic.bin"; ret = rtl8xxxu_load_firmware(priv, fw_name); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 8168a35c314c..0d4ff80f0ab1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -625,6 +625,7 @@ struct rtl8xxxu_priv { u8 mac_addr[ETH_ALEN]; char chip_name[8]; + char chip_vendor[8]; u8 cck_tx_power_index_A[3]; /* 0x10 */ u8 cck_tx_power_index_B[3]; u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ @@ -647,6 +648,7 @@ struct rtl8xxxu_priv { u32 has_gps:1; u32 hi_pa:1; u32 vendor_umc:1; + u32 vendor_smic:1; u32 has_polarity_ctrl:1; u32 has_eeprom:1; u32 boot_eeprom:1; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 8f6c9c6c7c09..a934c0de197b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -222,6 +222,10 @@ #define SYS_CFG_CHIP_VER (BIT(12) | BIT(13) | BIT(14) | BIT(15)) #define SYS_CFG_BT_FUNC BIT(16) #define SYS_CFG_VENDOR_ID BIT(19) +#define SYS_CFG_VENDOR_EXT_MASK (BIT(18) | BIT(19)) +#define SYS_CFG_VENDOR_ID_TSMC 0 +#define SYS_CFG_VENDOR_ID_SMIC BIT(18) +#define SYS_CFG_VENDOR_ID_UMC BIT(19) #define SYS_CFG_PAD_HWPD_IDN BIT(22) #define SYS_CFG_TRP_VAUX_EN BIT(23) #define SYS_CFG_TRP_BT_EN BIT(24) -- cgit v1.2.3 From 4a82ffe3f8040c5b3dd50888e00077d1cc8cbf29 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:01 -0500 Subject: rtl8xxxu: Use 1024 byte block loads for 8192eu firmware The rtl8192eu can handle 1024 byte block writes, unlike it's predecessors (8192cu/8188cu). Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 745942299f41..35c0625141f1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5975,7 +5975,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_efuse = rtl8192eu_parse_efuse, .load_firmware = rtl8192eu_load_firmware, .power_on = rtl8192cu_power_on, - .writeN_block_size = 128, + .writeN_block_size = 1024, }; static struct usb_device_id dev_table[] = { -- cgit v1.2.3 From b001e086c4c8a5e60a2a28d40ded47ab124458f6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:02 -0500 Subject: rtl8xxxu: Add rtl8192eu_nic.bin to the MODULE_FIRMWARE list Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 35c0625141f1..e48815f25c8a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -54,6 +54,7 @@ MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8192eu_nic.bin"); module_param_named(debug, rtl8xxxu_debug, int, 0600); MODULE_PARM_DESC(debug, "Set debug mask"); -- cgit v1.2.3 From c05a9dbfb2b1f30b13126b0901fe4f1bdaf37406 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:03 -0500 Subject: rtl8xxxu: Implment rtl8192eu_power_on() This implements the rtl8192eu power on sequence, and splits it off from the rtl8192cu/rtl8723au power on sequence. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 133 ++++++++++++++++++++- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 6 + 2 files changed, 133 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e48815f25c8a..55d12fbb0345 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3791,7 +3791,7 @@ exit: return ret; } -static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv) +static void rtl8723a_disabled_to_emu(struct rtl8xxxu_priv *priv) { u8 val8; @@ -3811,7 +3811,82 @@ static void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); } -static int rtl8xxxu_emu_to_active(struct rtl8xxxu_priv *priv) +static void rtl8192e_disabled_to_emu(struct rtl8xxxu_priv *priv) +{ + u8 val8; + + /* Clear suspend enable and power down enable*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~(BIT(3) | BIT(4)); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); +} + +static int rtl8192e_emu_to_active(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u32 val32; + int count, ret = 0; + + /* disable HWPDN 0x04[15]=0*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~BIT(7); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* disable SW LPS 0x04[10]= 0 */ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~BIT(2); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* disable WL suspend*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 &= ~(BIT(3) | BIT(4)); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + /* wait till 0x04[17] = 1 power ready*/ + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + if (val32 & BIT(17)) + break; + + udelay(10); + } + + if (!count) { + ret = -EBUSY; + goto exit; + } + + /* We should be able to optimize the following three entries into one */ + + /* release WLON reset 0x04[16]= 1*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8); + + /* set, then poll until 0 */ + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + val32 |= APS_FSMCO_MAC_ENABLE; + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) { + ret = 0; + break; + } + udelay(10); + } + + if (!count) { + ret = -EBUSY; + goto exit; + } + +exit: + return ret; +} + +static int rtl8723a_emu_to_active(struct rtl8xxxu_priv *priv) { u8 val8; u32 val32; @@ -3940,9 +4015,9 @@ static int rtl8723au_power_on(struct rtl8xxxu_priv *priv) */ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0); - rtl8xxxu_disabled_to_emu(priv); + rtl8723a_disabled_to_emu(priv); - ret = rtl8xxxu_emu_to_active(priv); + ret = rtl8723a_emu_to_active(priv); if (ret) goto exit; @@ -4081,6 +4156,52 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) #endif +static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv) +{ + u16 val16; + u32 val32; + int ret; + + ret = 0; + + val32 = rtl8xxxu_read32(priv, REG_SYS_CFG); + if (val32 & SYS_CFG_SPS_LDO_SEL) { + rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3); + } else { + /* + * Raise 1.2V voltage + */ + val32 = rtl8xxxu_read32(priv, REG_8192E_LDOV12_CTRL); + val32 &= 0xff0fffff; + val32 |= 0x00500000; + rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32); + rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83); + } + + rtl8192e_disabled_to_emu(priv); + + ret = rtl8192e_emu_to_active(priv); + if (ret) + goto exit; + + rtl8xxxu_write16(priv, REG_CR, 0x0000); + + /* + * Enable MAC DMA/WMAC/SCHEDULE/SEC block + * Set CR bit10 to enable 32k calibration. + */ + val16 = rtl8xxxu_read16(priv, REG_CR); + val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | + CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | + CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | + CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | + CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE); + rtl8xxxu_write16(priv, REG_CR, val16); + +exit: + return ret; +} + static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) { u8 val8; @@ -5975,8 +6096,8 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_efuse = rtl8192eu_parse_efuse, .load_firmware = rtl8192eu_load_firmware, - .power_on = rtl8192cu_power_on, - .writeN_block_size = 1024, + .power_on = rtl8192eu_power_on, + .writeN_block_size = 128, }; static struct usb_device_id dev_table[] = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index a934c0de197b..02c7d6c6f62d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -72,6 +72,7 @@ #define REG_AFE_MISC 0x0010 #define REG_SPS0_CTRL 0x0011 #define REG_SPS_OCP_CFG 0x0018 +#define REG_8192E_LDOV12_CTRL 0x0014 #define REG_RSV_CTRL 0x001c #define REG_RF_CTRL 0x001f @@ -178,6 +179,8 @@ control */ #define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */ +#define REG_LDO_SW_CTRL 0x007c /* 8192eu */ + #define REG_MCU_FW_DL 0x0080 #define MCU_FW_DL_ENABLE BIT(0) #define MCU_FW_DL_READY BIT(1) @@ -229,6 +232,7 @@ #define SYS_CFG_PAD_HWPD_IDN BIT(22) #define SYS_CFG_TRP_VAUX_EN BIT(23) #define SYS_CFG_TRP_BT_EN BIT(24) +#define SYS_CFG_SPS_LDO_SEL BIT(24) /* 8192eu */ #define SYS_CFG_BD_PKG_SEL BIT(25) #define SYS_CFG_BD_HCI_SEL BIT(26) #define SYS_CFG_TYPE_ID BIT(27) @@ -261,6 +265,8 @@ #define GPIO_USB_SUSEN BIT(23) #define GPIO_RF_RL_ID (BIT(31) | BIT(30) | BIT(29) | BIT(28)) +#define REG_SYS_CFG2 0x00fc /* 8192eu */ + /* 0x0100 ~ 0x01FF MACTOP General Configuration */ #define REG_CR 0x0100 #define CR_HCI_TXDMA_ENABLE BIT(0) -- cgit v1.2.3 From 74b99bed874f5a17336515b63f776a8cbbbceb93 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:04 -0500 Subject: rtl8xxxu: Add rtl8xxxu_auto_llt_table() Newer chips can auto load the LLT table, it is no longer necessary to build it manually in the driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 33 +++++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 3 ++ 3 files changed, 36 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 55d12fbb0345..b2e4eac4d14f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2774,6 +2774,34 @@ exit: return ret; } +static int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page) +{ + u32 val32; + int ret = 0; + int i; + + val32 = rtl8xxxu_read32(priv, REG_AUTO_LLT); + pr_info("AUTO_LLT = %08x\n", val32); + val32 |= AUTO_LLT_INIT_LLT; + rtl8xxxu_write32(priv, REG_AUTO_LLT, val32); + + for (i = 500; i; i--) { + val32 = rtl8xxxu_read32(priv, REG_AUTO_LLT); + if (!(val32 & AUTO_LLT_INIT_LLT)) + break; + usleep_range(2, 4); + } + + if (i) { + ret = -EBUSY; + dev_warn(&priv->udev->dev, "LLT table init failed\n"); + } + else + dev_warn(&priv->udev->dev, "LLT table init success\n"); + + return ret; +} + static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv) { u16 val16, hi, lo; @@ -4287,7 +4315,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); if (!macpower) { - ret = rtl8xxxu_init_llt_table(priv, TX_TOTAL_PAGE_NUM); + ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM); if (ret) { dev_warn(dev, "%s: LLT table init failed\n", __func__); goto exit; @@ -6079,6 +6107,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .parse_efuse = rtl8723au_parse_efuse, .load_firmware = rtl8723au_load_firmware, .power_on = rtl8723au_power_on, + .llt_init = rtl8xxxu_init_llt_table, .writeN_block_size = 1024, }; @@ -6088,6 +6117,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .parse_efuse = rtl8192cu_parse_efuse, .load_firmware = rtl8192cu_load_firmware, .power_on = rtl8192cu_power_on, + .llt_init = rtl8xxxu_init_llt_table, .writeN_block_size = 128, }; @@ -6097,6 +6127,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_efuse = rtl8192eu_parse_efuse, .load_firmware = rtl8192eu_load_firmware, .power_on = rtl8192eu_power_on, + .llt_init = rtl8xxxu_auto_llt_table, .writeN_block_size = 128, }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 0d4ff80f0ab1..56c4e0a7c07d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -718,5 +718,6 @@ struct rtl8xxxu_fileops { int (*parse_efuse) (struct rtl8xxxu_priv *priv); int (*load_firmware) (struct rtl8xxxu_priv *priv); int (*power_on) (struct rtl8xxxu_priv *priv); + int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); int writeN_block_size; }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 02c7d6c6f62d..b83bd34bdd76 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -372,6 +372,9 @@ #define RQPN_NPQ_SHIFT 0 #define RQPN_EPQ_SHIFT 16 +#define REG_AUTO_LLT 0x0224 +#define AUTO_LLT_INIT_LLT BIT(16) + /* 0x0280 ~ 0x02FF RXDMA Configuration */ #define REG_RXDMA_AGG_PG_TH 0x0280 #define REG_RXPKT_NUM 0x0284 -- cgit v1.2.3 From 07bb46be5306d27c8d8ce7b287688554b9ca1569 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:05 -0500 Subject: rtl8xxxu: Init page boundaries before starting the firmware This reorganizes the device initialization to init page boundaries before starting the firmware. This matches the flow in the 8192eu vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 68 ++++++++++++------------ 1 file changed, 34 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index b2e4eac4d14f..65994e938a17 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4313,6 +4313,40 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) goto exit; } + dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); + if (!macpower) { + if (priv->ep_tx_normal_queue) + val8 = TX_PAGE_NUM_NORM_PQ; + else + val8 = 0; + + rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); + + val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; + + if (priv->ep_tx_high_queue) + val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); + if (priv->ep_tx_low_queue) + val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); + + rtl8xxxu_write32(priv, REG_RQPN, val32); + + /* + * Set TX buffer boundary + */ + val8 = TX_TOTAL_PAGE_NUM + 1; + rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); + rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8); + rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8); + rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8); + rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); + } + + ret = rtl8xxxu_init_queue_priority(priv); + dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); + if (ret) + goto exit; + dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); if (!macpower) { ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM); @@ -4392,40 +4426,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); - dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); - if (!macpower) { - if (priv->ep_tx_normal_queue) - val8 = TX_PAGE_NUM_NORM_PQ; - else - val8 = 0; - - rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); - - val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; - - if (priv->ep_tx_high_queue) - val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); - if (priv->ep_tx_low_queue) - val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); - - rtl8xxxu_write32(priv, REG_RQPN, val32); - - /* - * Set TX buffer boundary - */ - val8 = TX_TOTAL_PAGE_NUM + 1; - rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); - rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8); - rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8); - rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8); - rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); - } - - ret = rtl8xxxu_init_queue_priority(priv); - dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); - if (ret) - goto exit; - /* * Set RX page boundary */ -- cgit v1.2.3 From a47b9d477c792f8c0c9b6419a25528566260916a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:06 -0500 Subject: rtl8xxxu: Init the LLT after we start the firmware To match the flow of the vendor driver, move the LLT init to after the firmware is started. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 65994e938a17..ad5b15cb07b6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4342,6 +4342,15 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); } + ret = rtl8xxxu_download_firmware(priv); + dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret); + if (ret) + goto exit; + ret = rtl8xxxu_start_firmware(priv); + dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret); + if (ret) + goto exit; + ret = rtl8xxxu_init_queue_priority(priv); dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); if (ret) @@ -4356,15 +4365,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) } } - ret = rtl8xxxu_download_firmware(priv); - dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret); - if (ret) - goto exit; - ret = rtl8xxxu_start_firmware(priv); - dev_dbg(dev, "%s: start_fiwmare %i\n", __func__, ret); - if (ret) - goto exit; - ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); dev_dbg(dev, "%s: init_mac %i\n", __func__, ret); if (ret) -- cgit v1.2.3 From 4de24819192eb507525fa24182c86c2073accd12 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:07 -0500 Subject: rtl8xxxu: Fix incorrect test for auto LLT failure The logic for testing auto load failure in rtl8xxxu_auto_llt_table() was inverted. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ad5b15cb07b6..68d6bb89f719 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2781,7 +2781,6 @@ static int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page) int i; val32 = rtl8xxxu_read32(priv, REG_AUTO_LLT); - pr_info("AUTO_LLT = %08x\n", val32); val32 |= AUTO_LLT_INIT_LLT; rtl8xxxu_write32(priv, REG_AUTO_LLT, val32); @@ -2792,12 +2791,10 @@ static int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page) usleep_range(2, 4); } - if (i) { + if (!i) { ret = -EBUSY; dev_warn(&priv->udev->dev, "LLT table init failed\n"); } - else - dev_warn(&priv->udev->dev, "LLT table init success\n"); return ret; } -- cgit v1.2.3 From b63d0aaca62be6eed15d3829174307ac2d608623 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:08 -0500 Subject: rtl8xxxu: Kludge to drop incorrect USB OUT EP for 8192EU The 8192eu (and some other parts) will report an incorrect USB OUT EP. This tells the chip to drop it - as per the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 ++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 2 files changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 68d6bb89f719..b6d8014b8335 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4362,6 +4362,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) } } + if (priv->rtlchip == 0x8192e) { + val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); + val32 |= TXDMA_OFFSET_DROP_DATA_EN; + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); + } + ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); dev_dbg(dev, "%s: init_mac %i\n", __func__, ret); if (ret) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index b83bd34bdd76..226a1fa7d8b9 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -367,6 +367,7 @@ #define REG_FIFOPAGE 0x0204 #define REG_TDECTRL 0x0208 #define REG_TXDMA_OFFSET_CHK 0x020c +#define TXDMA_OFFSET_DROP_DATA_EN BIT(9) #define REG_TXDMA_STATUS 0x0210 #define REG_RQPN_NPQ 0x0214 #define RQPN_NPQ_SHIFT 0 -- cgit v1.2.3 From 99ad16cbea15a6e61acca5662eff8d195cc6fbf6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:09 -0500 Subject: rtl8xxxu: Init REG_HIMR[01] for 8192eu parts The newer generation chips have different interrupt registers. Initialize this correct registers on 8192eu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 3 +++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 4 ++++ 2 files changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index b6d8014b8335..e4d81277a3b0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4366,6 +4366,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); val32 |= TXDMA_OFFSET_DROP_DATA_EN; rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); + + rtl8xxxu_write32(priv, REG_HIMR0, 0x00); + rtl8xxxu_write32(priv, REG_HIMR1, 0x00); } ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 226a1fa7d8b9..3cbb54956e79 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -196,6 +196,10 @@ #define REG_HMBOX_EXT_1 0x008a #define REG_HMBOX_EXT_2 0x008c #define REG_HMBOX_EXT_3 0x008e +/* Interrupt registers for 8192e/8812 */ +#define REG_HIMR0 0x00b0 +#define REG_HIMR1 0x00b8 + /* Host suspend counter on FPGA platform */ #define REG_HOST_SUSP_CNT 0x00bc /* Efuse access protection for RTL8723 */ -- cgit v1.2.3 From 35a741febfae3cfc2a27d3b4935e255585ecfd81 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:10 -0500 Subject: rtl8xxxu: Initial rtl8723bu chip identification This provides initial detection of 8723bu devices, and selects the correct firmware image to load. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 40 ++++++++++++++++++++++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 +- 2 files changed, 38 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e4d81277a3b0..729f6d2d9d97 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -55,6 +55,8 @@ MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin"); MODULE_FIRMWARE("rtlwifi/rtl8192eu_nic.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723bu_nic.bin"); +MODULE_FIRMWARE("rtlwifi/rtl8723bu_bt.bin"); module_param_named(debug, rtl8xxxu_debug, int, 0600); MODULE_PARM_DESC(debug, "Set debug mask"); @@ -1700,11 +1702,17 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } if (val32 & SYS_CFG_BT_FUNC) { - sprintf(priv->chip_name, "8723AU"); + if (priv->chip_cut >= 3) { + sprintf(priv->chip_name, "8723BU"); + priv->rtlchip = 0x8723b; + } else { + sprintf(priv->chip_name, "8723AU"); + priv->rtlchip = 0x8723a; + } + priv->rf_paths = 1; priv->rx_paths = 1; priv->tx_paths = 1; - priv->rtlchip = 0x8723a; val32 = rtl8xxxu_read32(priv, REG_MULTI_FUNC_CTRL); if (val32 & MULTI_WIFI_FUNC_EN) @@ -1807,6 +1815,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) */ if (!priv->ep_tx_count) { switch (priv->nr_out_eps) { + case 4: case 3: priv->ep_tx_low_queue = 1; priv->ep_tx_count++; @@ -2321,6 +2330,7 @@ static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name) case 0x92e0: case 0x92c0: case 0x88c0: + case 0x5300: case 0x2300: break; default: @@ -2362,6 +2372,20 @@ static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv) return ret; } +static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv) +{ + char *fw_name; + int ret; + + if (priv->enable_bluetooth) + fw_name = "rtlwifi/rtl8723bu_bt.bin"; + else + fw_name = "rtlwifi/rtl8723bu_nic.bin"; + + ret = rtl8xxxu_load_firmware(priv, fw_name); + return ret; +} + #ifdef CONFIG_RTL8XXXU_UNTESTED static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv) @@ -2587,7 +2611,7 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) else rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); - if (priv->rtlchip == 0x8723a && + if ((priv->rtlchip == 0x8723a || priv->rtlchip == 0x8723b) && priv->efuse_wifi.efuse8723.version >= 0x01) { val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL); @@ -6117,6 +6141,14 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .writeN_block_size = 1024, }; +static struct rtl8xxxu_fileops rtl8723bu_fops = { + .parse_efuse = rtl8723au_parse_efuse, + .load_firmware = rtl8723bu_load_firmware, + .power_on = rtl8723au_power_on, + .llt_init = rtl8xxxu_auto_llt_table, + .writeN_block_size = 128, +}; + #ifdef CONFIG_RTL8XXXU_UNTESTED static struct rtl8xxxu_fileops rtl8192cu_fops = { @@ -6146,6 +6178,8 @@ static struct usb_device_id dev_table[] = { .driver_info = (unsigned long)&rtl8723au_fops}, {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x818b, 0xff, 0xff, 0xff), .driver_info = (unsigned long)&rtl8192eu_fops}, +{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0xb720, 0xff, 0xff, 0xff), + .driver_info = (unsigned long)&rtl8723bu_fops}, #ifdef CONFIG_RTL8XXXU_UNTESTED /* Still supported by rtlwifi */ {USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8176, 0xff, 0xff, 0xff), diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 56c4e0a7c07d..dd4c56379d50 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -34,7 +34,7 @@ #define RTL8XXXU_MAX_REG_POLL 500 #define USB_INTR_CONTENT_LENGTH 56 -#define RTL8XXXU_OUT_ENDPOINTS 3 +#define RTL8XXXU_OUT_ENDPOINTS 4 #define REALTEK_USB_READ 0xc0 #define REALTEK_USB_WRITE 0x40 -- cgit v1.2.3 From 3c836d6014e5057fc5845829ae762190008bded4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:11 -0500 Subject: rtl8xxxu: Add rtl8723bu_parse_efuse() and 8723bu efuse definition Implement first stab at parsing the 8723bu's efuse. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 47 +++++++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 42 +++++++++++++++++++++ 2 files changed, 88 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 729f6d2d9d97..255300c912d4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1877,6 +1877,51 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) return 0; } +static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) +{ + if (priv->efuse_wifi.efuse8723bu.rtl_id != cpu_to_le16(0x8129)) + return -EINVAL; + + ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723bu.mac_addr); + + memcpy(priv->cck_tx_power_index_A, + priv->efuse_wifi.efuse8723bu.cck_tx_power_index_A, + sizeof(priv->cck_tx_power_index_A)); + memcpy(priv->cck_tx_power_index_B, + priv->efuse_wifi.efuse8723bu.cck_tx_power_index_B, + sizeof(priv->cck_tx_power_index_B)); + + memcpy(priv->ht40_1s_tx_power_index_A, + priv->efuse_wifi.efuse8723bu.ht40_1s_tx_power_index_A, + sizeof(priv->ht40_1s_tx_power_index_A)); + memcpy(priv->ht40_1s_tx_power_index_B, + priv->efuse_wifi.efuse8723bu.ht40_1s_tx_power_index_B, + sizeof(priv->ht40_1s_tx_power_index_B)); + + dev_info(&priv->udev->dev, "Vendor: %.7s\n", + priv->efuse_wifi.efuse8723bu.vendor_name); + dev_info(&priv->udev->dev, "Product: %.41s\n", + priv->efuse_wifi.efuse8723bu.device_name); + + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { + int i; + unsigned char *raw = priv->efuse_wifi.raw; + + dev_info(&priv->udev->dev, + "%s: dumping efuse (0x%02zx bytes):\n", + __func__, sizeof(struct rtl8723bu_efuse)); + for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) { + dev_info(&priv->udev->dev, "%02x: " + "%02x %02x %02x %02x %02x %02x %02x %02x\n", i, + raw[i], raw[i + 1], raw[i + 2], + raw[i + 3], raw[i + 4], raw[i + 5], + raw[i + 6], raw[i + 7]); + } + } + + return 0; +} + #ifdef CONFIG_RTL8XXXU_UNTESTED static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) @@ -6142,7 +6187,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { }; static struct rtl8xxxu_fileops rtl8723bu_fops = { - .parse_efuse = rtl8723au_parse_efuse, + .parse_efuse = rtl8723bu_parse_efuse, .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_auto_llt_table, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index dd4c56379d50..bf424db90901 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -500,6 +500,47 @@ struct rtl8192cu_efuse { u8 customer_id; }; +struct rtl8723bu_efuse { + __le16 rtl_id; + u8 res0[0x0e]; + u8 cck_tx_power_index_A[3]; /* 0x10 */ + u8 cck_tx_power_index_B[3]; + u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ + u8 ht40_1s_tx_power_index_B[3]; + u8 res1[0x9c]; + u8 channel_plan; /* 0xb8 */ + u8 xtal_k; + u8 thermal_meter; + u8 iqk_lck; + u8 pa_type; /* 0xbc */ + u8 lna_type_2g; /* 0xbd */ + u8 res2[3]; + u8 rf_board_option; + u8 rf_feature_option; + u8 rf_bt_setting; + u8 eeprom_version; + u8 eeprom_customer_id; + u8 res3[2]; + u8 tx_pwr_calibrate_rate; + u8 rf_antenna_option; /* 0xc9 */ + u8 rfe_option; + u8 res4[9]; + u8 usb_optional_function; + u8 res5[0x1e]; + u8 res6[2]; + u8 serial[0x0b]; /* 0xf5 */ + u8 vid; /* 0x100 */ + u8 res7; + u8 pid; + u8 res8[4]; + u8 mac_addr[ETH_ALEN]; /* 0x107 */ + u8 res9[2]; + u8 vendor_name[0x07]; + u8 res10[2]; + u8 device_name[0x14]; /* 0xe8 */ + u8 res11[0xd4]; +}; + struct rtl8192eu_efuse { __le16 rtl_id; u8 res0[0x0e]; @@ -690,6 +731,7 @@ struct rtl8xxxu_priv { union { u8 raw[EFUSE_MAP_LEN]; struct rtl8723au_efuse efuse8723; + struct rtl8723bu_efuse efuse8723bu; struct rtl8192cu_efuse efuse8192; struct rtl8192eu_efuse efuse8192eu; } efuse_wifi; -- cgit v1.2.3 From adfc01243f0fc891c630f27ef45320fea71bb98e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:12 -0500 Subject: rtl8xxxu: Use 1024 byte writes for writing 8723bu firmware The 8723bu, like the 8192eu, can also handle 1024 byte block writes. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 255300c912d4..93f6b714d98b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6191,7 +6191,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_auto_llt_table, - .writeN_block_size = 128, + .writeN_block_size = 1024, }; #ifdef CONFIG_RTL8XXXU_UNTESTED -- cgit v1.2.3 From 0e28b9753a9cf462a535c56e2d31ee17521a4b73 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:13 -0500 Subject: rtl8xxxu: Only setup USB interrupts for parts which support it Only 1st generation chips do provide USB interrupts, so do not try to setup interrupts for newer chips (8192eu and 8723bu). Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 18 +++++++++++++----- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 93f6b714d98b..467c0bcaadb1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1707,6 +1707,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rtlchip = 0x8723b; } else { sprintf(priv->chip_name, "8723AU"); + priv->usb_interrupts = 1; priv->rtlchip = 0x8723a; } @@ -1744,12 +1745,14 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 1; + priv->usb_interrupts = 1; priv->rtlchip = 0x8191c; } else { sprintf(priv->chip_name, "8192CU"); priv->rf_paths = 2; priv->rx_paths = 2; priv->tx_paths = 2; + priv->usb_interrupts = 1; priv->rtlchip = 0x8192c; } priv->has_wifi = 1; @@ -1759,6 +1762,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) priv->rx_paths = 1; priv->tx_paths = 1; priv->rtlchip = 0x8188c; + priv->usb_interrupts = 1; priv->has_wifi = 1; } @@ -5825,9 +5829,11 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) init_usb_anchor(&priv->int_anchor); rtl8723a_enable_rf(priv); - ret = rtl8xxxu_submit_int_urb(hw); - if (ret) - goto exit; + if (priv->usb_interrupts) { + ret = rtl8xxxu_submit_int_urb(hw); + if (ret) + goto exit; + } for (i = 0; i < RTL8XXXU_TX_URBS; i++) { tx_urb = kmalloc(sizeof(struct rtl8xxxu_tx_urb), GFP_KERNEL); @@ -5902,14 +5908,16 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw) usb_kill_anchored_urbs(&priv->rx_anchor); usb_kill_anchored_urbs(&priv->tx_anchor); - usb_kill_anchored_urbs(&priv->int_anchor); + if (priv->usb_interrupts) + usb_kill_anchored_urbs(&priv->int_anchor); rtl8723a_disable_rf(priv); /* * Disable interrupts */ - rtl8xxxu_write32(priv, REG_USB_HIMR, 0); + if (priv->usb_interrupts) + rtl8xxxu_write32(priv, REG_USB_HIMR, 0); rtl8xxxu_free_rx_resources(priv); rtl8xxxu_free_tx_resources(priv); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index bf424db90901..205504848558 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -693,6 +693,7 @@ struct rtl8xxxu_priv { u32 has_polarity_ctrl:1; u32 has_eeprom:1; u32 boot_eeprom:1; + u32 usb_interrupts:1; u32 ep_tx_high_queue:1; u32 ep_tx_normal_queue:1; u32 ep_tx_low_queue:1; -- cgit v1.2.3 From 36c32588c83df156e80e485e48a827cd57589620 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:14 -0500 Subject: rtl8xxxu: Add rtl8723b_phy_1t_init_table This adds the 8723bu PHY 1T init table. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 103 +++++++++++++++++++++++ 1 file changed, 103 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 467c0bcaadb1..cc91247f7341 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -251,6 +251,107 @@ static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = { {0xffff, 0xffffffff}, }; +static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = { + {0x800, 0x80040000}, {0x804, 0x00000003}, + {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, + {0x810, 0x10001331}, {0x814, 0x020c3d10}, + {0x818, 0x02200385}, {0x81c, 0x00000000}, + {0x820, 0x01000100}, {0x824, 0x00190204}, + {0x828, 0x00000000}, {0x82c, 0x00000000}, + {0x830, 0x00000000}, {0x834, 0x00000000}, + {0x838, 0x00000000}, {0x83c, 0x00000000}, + {0x840, 0x00010000}, {0x844, 0x00000000}, + {0x848, 0x00000000}, {0x84c, 0x00000000}, + {0x850, 0x00000000}, {0x854, 0x00000000}, + {0x858, 0x569a11a9}, {0x85c, 0x01000014}, + {0x860, 0x66f60110}, {0x864, 0x061f0649}, + {0x868, 0x00000000}, {0x86c, 0x27272700}, + {0x870, 0x07000760}, {0x874, 0x25004000}, + {0x878, 0x00000808}, {0x87c, 0x00000000}, + {0x880, 0xb0000c1c}, {0x884, 0x00000001}, + {0x888, 0x00000000}, {0x88c, 0xccc000c0}, + {0x890, 0x00000800}, {0x894, 0xfffffffe}, + {0x898, 0x40302010}, {0x89c, 0x00706050}, + {0x900, 0x00000000}, {0x904, 0x00000023}, + {0x908, 0x00000000}, {0x90c, 0x81121111}, + {0x910, 0x00000002}, {0x914, 0x00000201}, + {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c}, + {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f}, + {0xa10, 0x9500bb78}, {0xa14, 0x1114d028}, + {0xa18, 0x00881117}, {0xa1c, 0x89140f00}, + {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317}, + {0xa28, 0x00000204}, {0xa2c, 0x00d30000}, + {0xa70, 0x101fbf00}, {0xa74, 0x00000007}, + {0xa78, 0x00000900}, {0xa7c, 0x225b0606}, + {0xa80, 0x21806490}, {0xb2c, 0x00000000}, + {0xc00, 0x48071d40}, {0xc04, 0x03a05611}, + {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c}, + {0xc10, 0x08800000}, {0xc14, 0x40000100}, + {0xc18, 0x08800000}, {0xc1c, 0x40000100}, + {0xc20, 0x00000000}, {0xc24, 0x00000000}, + {0xc28, 0x00000000}, {0xc2c, 0x00000000}, + {0xc30, 0x69e9ac44}, {0xc34, 0x469652af}, + {0xc38, 0x49795994}, {0xc3c, 0x0a97971c}, + {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7}, + {0xc48, 0xec020107}, {0xc4c, 0x007f037f}, + {0xc50, 0x69553420}, {0xc54, 0x43bc0094}, + {0xc58, 0x00013149}, {0xc5c, 0x00250492}, + {0xc60, 0x00000000}, {0xc64, 0x7112848b}, + {0xc68, 0x47c00bff}, {0xc6c, 0x00000036}, + {0xc70, 0x2c7f000d}, {0xc74, 0x020610db}, + {0xc78, 0x0000001f}, {0xc7c, 0x00b91612}, + {0xc80, 0x390000e4}, {0xc84, 0x20f60000}, + {0xc88, 0x40000100}, {0xc8c, 0x20200000}, + {0xc90, 0x00020e1a}, {0xc94, 0x00000000}, + {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f}, + {0xca0, 0x00000000}, {0xca4, 0x000300a0}, + {0xca8, 0x00000000}, {0xcac, 0x00000000}, + {0xcb0, 0x00000000}, {0xcb4, 0x00000000}, + {0xcb8, 0x00000000}, {0xcbc, 0x28000000}, + {0xcc0, 0x00000000}, {0xcc4, 0x00000000}, + {0xcc8, 0x00000000}, {0xccc, 0x00000000}, + {0xcd0, 0x00000000}, {0xcd4, 0x00000000}, + {0xcd8, 0x64b22427}, {0xcdc, 0x00766932}, + {0xce0, 0x00222222}, {0xce4, 0x00000000}, + {0xce8, 0x37644302}, {0xcec, 0x2f97d40c}, + {0xd00, 0x00000740}, {0xd04, 0x40020401}, + {0xd08, 0x0000907f}, {0xd0c, 0x20010201}, + {0xd10, 0xa0633333}, {0xd14, 0x3333bc53}, + {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975}, + {0xd30, 0x00000000}, {0xd34, 0x80608000}, + {0xd38, 0x00000000}, {0xd3c, 0x00127353}, + {0xd40, 0x00000000}, {0xd44, 0x00000000}, + {0xd48, 0x00000000}, {0xd4c, 0x00000000}, + {0xd50, 0x6437140a}, {0xd54, 0x00000000}, + {0xd58, 0x00000282}, {0xd5c, 0x30032064}, + {0xd60, 0x4653de68}, {0xd64, 0x04518a3c}, + {0xd68, 0x00002101}, {0xd6c, 0x2a201c16}, + {0xd70, 0x1812362e}, {0xd74, 0x322c2220}, + {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d}, + {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d}, + {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d}, + {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d}, + {0xe28, 0x00000000}, {0xe30, 0x1000dc1f}, + {0xe34, 0x10008c1f}, {0xe38, 0x02140102}, + {0xe3c, 0x681604c2}, {0xe40, 0x01007c00}, + {0xe44, 0x01004800}, {0xe48, 0xfb000000}, + {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f}, + {0xe54, 0x10008c1f}, {0xe58, 0x02140102}, + {0xe5c, 0x28160d05}, {0xe60, 0x00000008}, + {0xe68, 0x001b2556}, {0xe6c, 0x00c00096}, + {0xe70, 0x00c00096}, {0xe74, 0x01000056}, + {0xe78, 0x01000014}, {0xe7c, 0x01000056}, + {0xe80, 0x01000014}, {0xe84, 0x00c00096}, + {0xe88, 0x01000056}, {0xe8c, 0x00c00096}, + {0xed0, 0x00c00096}, {0xed4, 0x00c00096}, + {0xed8, 0x00c00096}, {0xedc, 0x000000d6}, + {0xee0, 0x000000d6}, {0xeec, 0x01c00016}, + {0xf14, 0x00000003}, {0xf4c, 0x00000000}, + {0xf00, 0x00000300}, + {0x820, 0x01000100}, {0x800, 0x83040000}, + {0xffff, 0xffffffff}, +}; + static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = { {0x024, 0x0011800f}, {0x028, 0x00ffdb83}, {0x800, 0x80040002}, {0x804, 0x00000003}, @@ -2589,6 +2690,8 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); else if (priv->tx_paths == 2) rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); + else if (priv->rtlchip == 0x8723b) + rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); else rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); -- cgit v1.2.3 From 22a31d455ce20737e647f8c9cc37ac9080deee96 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:15 -0500 Subject: rtl8xxxu: Add rtl8723bu_radioa_1t_init_table Add 8723bu 1T radio init table. The vendor driver indicates that some registers need special treatment for TFBGA90, TFBGA80, and TFBGA79 packaging. However the vendor driver never actually checks the package type, so just stick to default values here. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 78 ++++++++++++++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 6 +- 2 files changed, 82 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index cc91247f7341..7d62440ec893 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -792,6 +792,75 @@ static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = { {0xff, 0xffffffff} }; +static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = { + {0x00, 0x00010000}, {0xb0, 0x000dffe0}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0xb1, 0x00000018}, + {0xfe, 0x00000000}, {0xfe, 0x00000000}, + {0xfe, 0x00000000}, {0xb2, 0x00084c00}, + {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa}, + {0xb7, 0x00000010}, {0xb8, 0x0000907f}, + {0x5c, 0x00000002}, {0x7c, 0x00000002}, + {0x7e, 0x00000005}, {0x8b, 0x0006fc00}, + {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2}, + {0x1e, 0x00000000}, {0xdf, 0x00000780}, + {0x50, 0x00067435}, + /* + * The 8723bu vendor driver indicates that bit 8 should be set in + * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However + * they never actually check the package type - and just default + * to not setting it. + */ + {0x51, 0x0006b04e}, + {0x52, 0x000007d2}, {0x53, 0x00000000}, + {0x54, 0x00050400}, {0x55, 0x0004026e}, + {0xdd, 0x0000004c}, {0x70, 0x00067435}, + /* + * 0x71 has same package type condition as for register 0x51 + */ + {0x71, 0x0006b04e}, + {0x72, 0x000007d2}, {0x73, 0x00000000}, + {0x74, 0x00050400}, {0x75, 0x0004026e}, + {0xef, 0x00000100}, {0x34, 0x0000add7}, + {0x35, 0x00005c00}, {0x34, 0x00009dd4}, + {0x35, 0x00005000}, {0x34, 0x00008dd1}, + {0x35, 0x00004400}, {0x34, 0x00007dce}, + {0x35, 0x00003800}, {0x34, 0x00006cd1}, + {0x35, 0x00004400}, {0x34, 0x00005cce}, + {0x35, 0x00003800}, {0x34, 0x000048ce}, + {0x35, 0x00004400}, {0x34, 0x000034ce}, + {0x35, 0x00003800}, {0x34, 0x00002451}, + {0x35, 0x00004400}, {0x34, 0x0000144e}, + {0x35, 0x00003800}, {0x34, 0x00000051}, + {0x35, 0x00004400}, {0xef, 0x00000000}, + {0xef, 0x00000100}, {0xed, 0x00000010}, + {0x44, 0x0000add7}, {0x44, 0x00009dd4}, + {0x44, 0x00008dd1}, {0x44, 0x00007dce}, + {0x44, 0x00006cc1}, {0x44, 0x00005cce}, + {0x44, 0x000044d1}, {0x44, 0x000034ce}, + {0x44, 0x00002451}, {0x44, 0x0000144e}, + {0x44, 0x00000051}, {0xef, 0x00000000}, + {0xed, 0x00000000}, {0x7f, 0x00020080}, + {0xef, 0x00002000}, {0x3b, 0x000380ef}, + {0x3b, 0x000302fe}, {0x3b, 0x00028ce6}, + {0x3b, 0x000200bc}, {0x3b, 0x000188a5}, + {0x3b, 0x00010fbc}, {0x3b, 0x00008f71}, + {0x3b, 0x00000900}, {0xef, 0x00000000}, + {0xed, 0x00000001}, {0x40, 0x000380ef}, + {0x40, 0x000302fe}, {0x40, 0x00028ce6}, + {0x40, 0x000200bc}, {0x40, 0x000188a5}, + {0x40, 0x00010fbc}, {0x40, 0x00008f71}, + {0x40, 0x00000900}, {0xed, 0x00000000}, + {0x82, 0x00080000}, {0x83, 0x00008000}, + {0x84, 0x00048d80}, {0x85, 0x00068000}, + {0xa2, 0x00080000}, {0xa3, 0x00008000}, + {0xa4, 0x00048d80}, {0xa5, 0x00068000}, + {0xed, 0x00000002}, {0xef, 0x00000002}, + {0x56, 0x00000032}, {0x76, 0x00000032}, + {0x01, 0x00000780}, + {0xff, 0xffffffff} +}; + static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = { {0x00, 0x00030159}, {0x01, 0x00031284}, {0x02, 0x00098000}, {0x03, 0x00018c63}, @@ -1270,6 +1339,11 @@ static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv, return retval; } +/* + * The RTL8723BU driver indicates that registers 0xb2 and 0xb6 can + * have write issues in high temperature conditions. We may have to + * retry writing them. + */ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv, enum rtl8xxxu_rfpath path, u8 reg, u32 data) { @@ -4562,6 +4636,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rftable = rtl8723au_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); break; + case 0x8723b: + rftable = rtl8723bu_radioa_1t_init_table; + ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + break; case 0x8188c: if (priv->hi_pa) rftable = rtl8188ru_radioa_1t_highpa_table; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 205504848558..326fda5e810b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -537,8 +537,10 @@ struct rtl8723bu_efuse { u8 res9[2]; u8 vendor_name[0x07]; u8 res10[2]; - u8 device_name[0x14]; /* 0xe8 */ - u8 res11[0xd4]; + u8 device_name[0x14]; + u8 res11[0xcf]; + u8 package_type; /* 0x1fb */ + u8 res12[0x4]; }; struct rtl8192eu_efuse { -- cgit v1.2.3 From f0d9f5e90717a5e1ab014d63590d10c806ab15b0 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:16 -0500 Subject: rtl8xxxu: Add rtl8723bu_phy_init_antenna_selection() So far this is just for 8723BU. It includes writing to a number of registers I have seen no description for so far. 0x0064 0x0930 0x0944 Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 36 ++++++++++++++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 37 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7d62440ec893..c55193bd35b4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2670,6 +2670,38 @@ static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv) } } +static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) +{ + u32 val32; + + val32 = rtl8xxxu_read32(priv, 0x64); + val32 &= ~(BIT(20) | BIT(24)); + rtl8xxxu_write32(priv, 0x64, val32); + + val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); + val32 &= ~BIT(4); + val32 |= BIT(3); + rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); + + val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); + val32 &= ~BIT(23); + val32 |= BIT(24); + rtl8xxxu_write32(priv, REG_LEDCFG0, val32); + + val32 = rtl8xxxu_read32(priv, 0x0944); + val32 |= (BIT(0) | BIT(1)); + rtl8xxxu_write32(priv, 0x0944, val32); + + val32 = rtl8xxxu_read32(priv, 0x0930); + val32 &= 0xffffff00; + val32 |= 0x77; + rtl8xxxu_write32(priv, 0x0930, val32); + + val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); + val32 |= BIT(11); + rtl8xxxu_write32(priv, REG_PWR_DATA, val32); +} + static int rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array) { @@ -4621,6 +4653,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write32(priv, REG_HIMR1, 0x00); } + if (priv->fops->phy_init_antenna_selection) + priv->fops->phy_init_antenna_selection(priv); + ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); dev_dbg(dev, "%s: init_mac %i\n", __func__, ret); if (ret) @@ -6380,6 +6415,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_auto_llt_table, + .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .writeN_block_size = 1024, }; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 326fda5e810b..43300bd8b575 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -764,5 +764,6 @@ struct rtl8xxxu_fileops { int (*load_firmware) (struct rtl8xxxu_priv *priv); int (*power_on) (struct rtl8xxxu_priv *priv); int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); + void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); int writeN_block_size; }; -- cgit v1.2.3 From b7dd8ff916574f77d40914321c08b7d78f101530 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:17 -0500 Subject: rtl8xxxu: Add rtl8723b_mac_init_table Newer chips seem to have some different mac registers, requiring a different init table. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 37 +++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c55193bd35b4..813aadb3e473 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -153,6 +153,37 @@ static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = { {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff}, }; +static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = { + {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0}, + {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10}, + {0x430, 0x00}, {0x431, 0x00}, + {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05}, + {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05}, + {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01}, + {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00}, + {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f}, + {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00}, + {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f}, + {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66}, + {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, + {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, + {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, + {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, + {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, + {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, + {0x516, 0x0a}, {0x525, 0x4f}, + {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, + {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, + {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, + {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, + {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, + {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, + {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, + {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, + {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04}, + {0xffff, 0xff}, +}; + static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = { {0x800, 0x80040000}, {0x804, 0x00000003}, {0x808, 0x0000fc00}, {0x80c, 0x0000000a}, @@ -4656,7 +4687,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (priv->fops->phy_init_antenna_selection) priv->fops->phy_init_antenna_selection(priv); - ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); + if (priv->rtlchip == 0x8723b) + ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table); + else + ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table); + dev_dbg(dev, "%s: init_mac %i\n", __func__, ret); if (ret) goto exit; -- cgit v1.2.3 From b9f498e11fe9ae508c716d5cb4e468cf176018f2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:18 -0500 Subject: rtl8xxxu: Add 8723by AGC table The different RF module seems to require a different AGC table as well Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 75 +++++++++++++++++++++++- 1 file changed, 74 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 813aadb3e473..89c66a2e49c1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -748,6 +748,77 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = { {0xffff, 0xffffffff} }; +static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = { + {0xc78, 0xfd000001}, {0xc78, 0xfc010001}, + {0xc78, 0xfb020001}, {0xc78, 0xfa030001}, + {0xc78, 0xf9040001}, {0xc78, 0xf8050001}, + {0xc78, 0xf7060001}, {0xc78, 0xf6070001}, + {0xc78, 0xf5080001}, {0xc78, 0xf4090001}, + {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001}, + {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001}, + {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001}, + {0xc78, 0xed100001}, {0xc78, 0xec110001}, + {0xc78, 0xeb120001}, {0xc78, 0xea130001}, + {0xc78, 0xe9140001}, {0xc78, 0xe8150001}, + {0xc78, 0xe7160001}, {0xc78, 0xe6170001}, + {0xc78, 0xe5180001}, {0xc78, 0xe4190001}, + {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001}, + {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001}, + {0xc78, 0x671e0001}, {0xc78, 0x661f0001}, + {0xc78, 0x65200001}, {0xc78, 0x64210001}, + {0xc78, 0x63220001}, {0xc78, 0x4a230001}, + {0xc78, 0x49240001}, {0xc78, 0x48250001}, + {0xc78, 0x47260001}, {0xc78, 0x46270001}, + {0xc78, 0x45280001}, {0xc78, 0x44290001}, + {0xc78, 0x432a0001}, {0xc78, 0x422b0001}, + {0xc78, 0x292c0001}, {0xc78, 0x282d0001}, + {0xc78, 0x272e0001}, {0xc78, 0x262f0001}, + {0xc78, 0x0a300001}, {0xc78, 0x09310001}, + {0xc78, 0x08320001}, {0xc78, 0x07330001}, + {0xc78, 0x06340001}, {0xc78, 0x05350001}, + {0xc78, 0x04360001}, {0xc78, 0x03370001}, + {0xc78, 0x02380001}, {0xc78, 0x01390001}, + {0xc78, 0x013a0001}, {0xc78, 0x013b0001}, + {0xc78, 0x013c0001}, {0xc78, 0x013d0001}, + {0xc78, 0x013e0001}, {0xc78, 0x013f0001}, + {0xc78, 0xfc400001}, {0xc78, 0xfb410001}, + {0xc78, 0xfa420001}, {0xc78, 0xf9430001}, + {0xc78, 0xf8440001}, {0xc78, 0xf7450001}, + {0xc78, 0xf6460001}, {0xc78, 0xf5470001}, + {0xc78, 0xf4480001}, {0xc78, 0xf3490001}, + {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001}, + {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001}, + {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001}, + {0xc78, 0xec500001}, {0xc78, 0xeb510001}, + {0xc78, 0xea520001}, {0xc78, 0xe9530001}, + {0xc78, 0xe8540001}, {0xc78, 0xe7550001}, + {0xc78, 0xe6560001}, {0xc78, 0xe5570001}, + {0xc78, 0xe4580001}, {0xc78, 0xe3590001}, + {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001}, + {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001}, + {0xc78, 0x675e0001}, {0xc78, 0x665f0001}, + {0xc78, 0x65600001}, {0xc78, 0x64610001}, + {0xc78, 0x63620001}, {0xc78, 0x62630001}, + {0xc78, 0x61640001}, {0xc78, 0x48650001}, + {0xc78, 0x47660001}, {0xc78, 0x46670001}, + {0xc78, 0x45680001}, {0xc78, 0x44690001}, + {0xc78, 0x436a0001}, {0xc78, 0x426b0001}, + {0xc78, 0x286c0001}, {0xc78, 0x276d0001}, + {0xc78, 0x266e0001}, {0xc78, 0x256f0001}, + {0xc78, 0x24700001}, {0xc78, 0x09710001}, + {0xc78, 0x08720001}, {0xc78, 0x07730001}, + {0xc78, 0x06740001}, {0xc78, 0x05750001}, + {0xc78, 0x04760001}, {0xc78, 0x03770001}, + {0xc78, 0x02780001}, {0xc78, 0x01790001}, + {0xc78, 0x017a0001}, {0xc78, 0x017b0001}, + {0xc78, 0x017c0001}, {0xc78, 0x017d0001}, + {0xc78, 0x017e0001}, {0xc78, 0x017f0001}, + {0xc50, 0x69553422}, + {0xc50, 0x69553420}, + {0x824, 0x00390204}, + {0xffff, 0xffffffff} +}; + static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = { {0x00, 0x00030159}, {0x01, 0x00031284}, {0x02, 0x00098000}, {0x03, 0x00039c63}, @@ -2895,7 +2966,9 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_TX_TO_TX, val32); } - if (priv->hi_pa) + if (priv->rtlchip == 0x8723b) + rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table); + else if (priv->hi_pa) rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table); else rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); -- cgit v1.2.3 From ed35d09469e55b3324ef85b33e0355bd86cdd066 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:19 -0500 Subject: rtl8xxxu: Handle 32 bit mailbox extension regs found on 8723bu/8192eu/8812 Gen1 chips use a 16 bit mailbox extension register, for upto 48 bit mailbox commands. The newer generation chips use a 32 bit mailbox extension register instead, for upto 64 bit mailbox commands. Handle writing the larger mailboxes. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 29 +++++++++++++++++++----- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 8 ++++++- 2 files changed, 30 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 89c66a2e49c1..eb70fa270ca3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1482,7 +1482,8 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c) mbox_nr = priv->next_mbox; mbox_reg = REG_HMBOX_0 + (mbox_nr * 4); - mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2); + mbox_ext_reg = priv->fops->mbox_ext_reg + + (mbox_nr * priv->fops->mbox_ext_width); /* * MBOX ready? @@ -1504,11 +1505,19 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c) * Need to swap as it's being swapped again by rtl8xxxu_write16/32() */ if (h2c->cmd.cmd & H2C_EXT) { - rtl8xxxu_write16(priv, mbox_ext_reg, - le16_to_cpu(h2c->raw.ext)); - if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C) - dev_info(dev, "H2C_EXT %04x\n", - le16_to_cpu(h2c->raw.ext)); + if (priv->fops->mbox_ext_width == 4) { + rtl8xxxu_write32(priv, mbox_ext_reg, + le32_to_cpu(h2c->raw_wide.ext)); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C) + dev_info(dev, "H2C_EXT %08x\n", + le32_to_cpu(h2c->raw_wide.ext)); + } else { + rtl8xxxu_write16(priv, mbox_ext_reg, + le16_to_cpu(h2c->raw.ext)); + if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C) + dev_info(dev, "H2C_EXT %04x\n", + le16_to_cpu(h2c->raw.ext)); + } } rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data)); if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C) @@ -6516,6 +6525,8 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_init_llt_table, .writeN_block_size = 1024, + .mbox_ext_reg = REG_HMBOX_EXT_0, + .mbox_ext_width = 2, }; static struct rtl8xxxu_fileops rtl8723bu_fops = { @@ -6525,6 +6536,8 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .llt_init = rtl8xxxu_auto_llt_table, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .writeN_block_size = 1024, + .mbox_ext_reg = REG_HMBOX_EXT0_8723B, + .mbox_ext_width = 4, }; #ifdef CONFIG_RTL8XXXU_UNTESTED @@ -6535,6 +6548,8 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .power_on = rtl8192cu_power_on, .llt_init = rtl8xxxu_init_llt_table, .writeN_block_size = 128, + .mbox_ext_reg = REG_HMBOX_EXT_0, + .mbox_ext_width = 2, }; #endif @@ -6545,6 +6560,8 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .power_on = rtl8192eu_power_on, .llt_init = rtl8xxxu_auto_llt_table, .writeN_block_size = 128, + .mbox_ext_reg = REG_HMBOX_EXT0_8723B, + .mbox_ext_width = 4, }; static struct usb_device_id dev_table[] = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 43300bd8b575..7aed304712e7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -628,12 +628,16 @@ struct h2c_cmd { union { struct { u8 cmd; - u8 data[5]; + u8 data[7]; } __packed cmd; struct { __le32 data; __le16 ext; } __packed raw; + struct { + __le32 data; + __le32 ext; + } __packed raw_wide; struct { u8 cmd; u8 data; @@ -766,4 +770,6 @@ struct rtl8xxxu_fileops { int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); int writeN_block_size; + u16 mbox_ext_reg; + char mbox_ext_width; }; -- cgit v1.2.3 From 14d8856082c5fdb320b108af690c79d4d560d9b2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:20 -0500 Subject: rtl8xxxu: Add some missing register definitions for 8723bu This introduces additional register definitions for newer generation chips. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 3cbb54956e79..f1375b8904da 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -155,6 +155,8 @@ /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */ #define REG_GPIO_IO_SEL_2 0x0062 +/* RTL8723B */ +#define REG_PAD_CTRL1 0x0064 /* RTL8723 only WIFI/BT/GPS Multi-Function control source. */ #define REG_MULTI_FUNC_CTRL 0x0068 @@ -196,9 +198,11 @@ #define REG_HMBOX_EXT_1 0x008a #define REG_HMBOX_EXT_2 0x008c #define REG_HMBOX_EXT_3 0x008e -/* Interrupt registers for 8192e/8812 */ +/* Interrupt registers for 8192e/8723bu/8812 */ #define REG_HIMR0 0x00b0 +#define REG_HISR0 0x00b4 #define REG_HIMR1 0x00b8 +#define REG_HISR1 0x00bc /* Host suspend counter on FPGA platform */ #define REG_HOST_SUSP_CNT 0x00bc @@ -337,6 +341,8 @@ #define REG_MBIST_DONE 0x0178 #define REG_MBIST_FAIL 0x017c #define REG_C2HEVT_MSG_NORMAL 0x01a0 +/* 8192EU/8723BU/8812 */ +#define REG_C2HEVT_CMD_ID_8723B 0x01ae #define REG_C2HEVT_CLEAR 0x01af #define REG_C2HEVT_MSG_TEST 0x01b8 #define REG_MCUTST_1 0x01c0 -- cgit v1.2.3 From 6431ea00dfc5a7a3cdca1320034f20b69846a432 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:21 -0500 Subject: rtl8xxxu: Group USB fixups together for all chips In addition do not apply fixups for 8188/8191/8192 A-cut UMC parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 57 +++++++++++++----------- 1 file changed, 32 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index eb70fa270ca3..be95ef349d78 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4757,11 +4757,42 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) } } - if (priv->rtlchip == 0x8192e) { + /* Fix USB interface interference issue */ + if (priv->rtlchip == 0x8723a) { + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x8d); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); + } else { val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); val32 |= TXDMA_OFFSET_DROP_DATA_EN; rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); + } + /* Solve too many protocol error on USB bus */ + /* Can't do this for 8188/8192 UMC A cut parts */ + if (priv->rtlchip == 0x8723a || + ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c || + priv->rtlchip == 0x8188c) && + (priv->chip_cut || !priv->vendor_umc))) { + rtl8xxxu_write8(priv, 0xfe40, 0xe6); + rtl8xxxu_write8(priv, 0xfe41, 0x94); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x19); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe5); + rtl8xxxu_write8(priv, 0xfe41, 0x91); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + + rtl8xxxu_write8(priv, 0xfe40, 0xe2); + rtl8xxxu_write8(priv, 0xfe41, 0x81); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + } + + if (priv->rtlchip == 0x8192e || priv->rtlchip == 0x8723b) { rtl8xxxu_write32(priv, REG_HIMR0, 0x00); rtl8xxxu_write32(priv, REG_HIMR1, 0x00); } @@ -4968,30 +4999,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8723a_phy_lc_calibrate(priv); - /* fix USB interface interference issue */ - rtl8xxxu_write8(priv, 0xfe40, 0xe0); - rtl8xxxu_write8(priv, 0xfe41, 0x8d); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); - - /* Solve too many protocol error on USB bus */ - /* Can't do this for 8188/8192 UMC A cut parts */ - rtl8xxxu_write8(priv, 0xfe40, 0xe6); - rtl8xxxu_write8(priv, 0xfe41, 0x94); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - - rtl8xxxu_write8(priv, 0xfe40, 0xe0); - rtl8xxxu_write8(priv, 0xfe41, 0x19); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - - rtl8xxxu_write8(priv, 0xfe40, 0xe5); - rtl8xxxu_write8(priv, 0xfe41, 0x91); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - - rtl8xxxu_write8(priv, 0xfe40, 0xe2); - rtl8xxxu_write8(priv, 0xfe41, 0x81); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - /* Init BT hw config. */ rtl8xxxu_init_bt(priv); -- cgit v1.2.3 From d940c247adbd44c885777099e3e6c64b7ca2d078 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:22 -0500 Subject: rtl8xxxu: Add definitions for new generation h2c commands The larger mailboxes also use a different set of mailbox commands. This provides a list of the 64 bit commands. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 87 ++++++++++++++++++++++-- 1 file changed, 83 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 7aed304712e7..198b1d874551 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -617,12 +617,91 @@ struct rtl8xxxu_rfregs { #define H2C_MAX_MBOX 4 #define H2C_EXT BIT(7) -#define H2C_SET_POWER_MODE 1 -#define H2C_JOIN_BSS_REPORT 2 #define H2C_JOIN_BSS_DISCONNECT 0 #define H2C_JOIN_BSS_CONNECT 1 -#define H2C_SET_RSSI 5 -#define H2C_SET_RATE_MASK (6 | H2C_EXT) + +/* + * H2C (firmware) commands differ between the older generation chips + * 8188[cr]u, 819[12]cu, and 8723au, and the more recent chips 8723bu, + * 8192[de]u, 8192eu, and 8812. + */ +enum h2c_cmd_8723a { + H2C_SET_POWER_MODE = 1, + H2C_JOIN_BSS_REPORT = 2, + H2C_SET_RSSI = 5, + H2C_SET_RATE_MASK = (6 | H2C_EXT), +}; + +enum h2c_cmd_8723b { + /* + * Common Class: 000 + */ + H2C_8723B_RSVD_PAGE = 0x00, + H2C_8723B_MEDIA_STATUS_RPT = 0x01, + H2C_8723B_SCAN_ENABLE = 0x02, + H2C_8723B_KEEP_ALIVE = 0x03, + H2C_8723B_DISCON_DECISION = 0x04, + H2C_8723B_PSD_OFFLOAD = 0x05, + H2C_8723B_AP_OFFLOAD = 0x08, + H2C_8723B_BCN_RSVDPAGE = 0x09, + H2C_8723B_PROBERSP_RSVDPAGE = 0x0A, + H2C_8723B_FCS_RSVDPAGE = 0x10, + H2C_8723B_FCS_INFO = 0x11, + H2C_8723B_AP_WOW_GPIO_CTRL = 0x13, + + /* + * PoweSave Class: 001 + */ + H2C_8723B_SET_PWR_MODE = 0x20, + H2C_8723B_PS_TUNING_PARA = 0x21, + H2C_8723B_PS_TUNING_PARA2 = 0x22, + H2C_8723B_P2P_LPS_PARAM = 0x23, + H2C_8723B_P2P_PS_OFFLOAD = 0x24, + H2C_8723B_PS_SCAN_ENABLE = 0x25, + H2C_8723B_SAP_PS_ = 0x26, + H2C_8723B_INACTIVE_PS_ = 0x27, + H2C_8723B_FWLPS_IN_IPS_ = 0x28, + + /* + * Dynamic Mechanism Class: 010 + */ + H2C_8723B_MACID_CFG = 0x40, + H2C_8723B_TXBF = 0x41, + H2C_8723B_RSSI_SETTING = 0x42, + H2C_8723B_AP_REQ_TXRPT = 0x43, + H2C_8723B_INIT_RATE_COLLECT = 0x44, + + /* + * BT Class: 011 + */ + H2C_8723B_B_TYPE_TDMA = 0x60, + H2C_8723B_BT_INFO = 0x61, + H2C_8723B_FORCE_BT_TXPWR = 0x62, + H2C_8723B_BT_IGNORE_WLANACT = 0x63, + H2C_8723B_DAC_SWING_VALUE = 0x64, + H2C_8723B_ANT_SEL_RSV = 0x65, + H2C_8723B_WL_OPMODE = 0x66, + H2C_8723B_BT_MP_OPER = 0x67, + H2C_8723B_BT_CONTROL = 0x68, + H2C_8723B_BT_WIFI_CTRL = 0x69, + H2C_8723B_BT_FW_PATCH = 0x6A, + H2C_8723B_BT_WLAN_CALIBRATION = 0x6D, + + /* + * WOWLAN Class: 100 + */ + H2C_8723B_WOWLAN = 0x80, + H2C_8723B_REMOTE_WAKE_CTRL = 0x81, + H2C_8723B_AOAC_GLOBAL_INFO = 0x82, + H2C_8723B_AOAC_RSVD_PAGE = 0x83, + H2C_8723B_AOAC_RSVD_PAGE2 = 0x84, + H2C_8723B_D0_SCAN_OFFLOAD_CTRL = 0x85, + H2C_8723B_D0_SCAN_OFFLOAD_INFO = 0x86, + H2C_8723B_CHNL_SWITCH_OFFLOAD = 0x87, + + H2C_8723B_RESET_TSF = 0xC0, +}; + struct h2c_cmd { union { -- cgit v1.2.3 From f6c47702ede3edc1dbf73355c134d7e33d9c86ca Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 29 Feb 2016 17:04:23 -0500 Subject: rtl8xxxu: Don't check for illegal offset when reading from efuse It is enough to check for either illegal offset or illegal map address because map address is a value derived from an offset: map_addr = offset * 8 EFUSE_MAP_LEN = EFUSE_MAX_SECTION_8723A * 8 Leave just the check for an illegal map address because its upper bound (EFUSE_MAP_LEN) is used also in a couple other places. Signed-off-by: Jakub Sitnicki Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 63 +++++++++++------------- 1 file changed, 28 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index be95ef349d78..c279d6d04eb4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2417,6 +2417,8 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) efuse_addr = 0; while (efuse_addr < EFUSE_REAL_CONTENT_LEN_8723A) { + u16 map_addr; + ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &header); if (ret || header == 0xff) goto exit; @@ -2439,45 +2441,36 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) word_mask = header & 0x0f; } - if (offset < EFUSE_MAX_SECTION_8723A) { - u16 map_addr; - /* Get word enable value from PG header */ + /* Get word enable value from PG header */ - /* We have 8 bits to indicate validity */ - map_addr = offset * 8; - if (map_addr >= EFUSE_MAP_LEN) { - dev_warn(dev, "%s: Illegal map_addr (%04x), " - "efuse corrupt!\n", - __func__, map_addr); - ret = -EINVAL; - goto exit; - } - for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { - /* Check word enable condition in the section */ - if (!(word_mask & BIT(i))) { - ret = rtl8xxxu_read_efuse8(priv, - efuse_addr++, - &val8); - if (ret) - goto exit; - priv->efuse_wifi.raw[map_addr++] = val8; - - ret = rtl8xxxu_read_efuse8(priv, - efuse_addr++, - &val8); - if (ret) - goto exit; - priv->efuse_wifi.raw[map_addr++] = val8; - } else - map_addr += 2; - } - } else { - dev_warn(dev, - "%s: Illegal offset (%04x), efuse corrupt!\n", - __func__, offset); + /* We have 8 bits to indicate validity */ + map_addr = offset * 8; + if (map_addr >= EFUSE_MAP_LEN) { + dev_warn(dev, "%s: Illegal map_addr (%04x), " + "efuse corrupt!\n", + __func__, map_addr); ret = -EINVAL; goto exit; } + for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { + /* Check word enable condition in the section */ + if (!(word_mask & BIT(i))) { + ret = rtl8xxxu_read_efuse8(priv, + efuse_addr++, + &val8); + if (ret) + goto exit; + priv->efuse_wifi.raw[map_addr++] = val8; + + ret = rtl8xxxu_read_efuse8(priv, + efuse_addr++, + &val8); + if (ret) + goto exit; + priv->efuse_wifi.raw[map_addr++] = val8; + } else + map_addr += 2; + } } exit: -- cgit v1.2.3 From 32a39dd4b574abd32304857e8a5b24de113b81ee Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 29 Feb 2016 17:04:24 -0500 Subject: rtl8xxxu: Skip disabled efuse words early Avoid a negative conditional and an extra level of indentation in the bigger part of the loop body. Signed-off-by: Jakub Sitnicki Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 28 +++++++++++------------- 1 file changed, 13 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c279d6d04eb4..7a1c094f1f88 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2454,22 +2454,20 @@ static int rtl8xxxu_read_efuse(struct rtl8xxxu_priv *priv) } for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) { /* Check word enable condition in the section */ - if (!(word_mask & BIT(i))) { - ret = rtl8xxxu_read_efuse8(priv, - efuse_addr++, - &val8); - if (ret) - goto exit; - priv->efuse_wifi.raw[map_addr++] = val8; - - ret = rtl8xxxu_read_efuse8(priv, - efuse_addr++, - &val8); - if (ret) - goto exit; - priv->efuse_wifi.raw[map_addr++] = val8; - } else + if (word_mask & BIT(i)) { map_addr += 2; + continue; + } + + ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8); + if (ret) + goto exit; + priv->efuse_wifi.raw[map_addr++] = val8; + + ret = rtl8xxxu_read_efuse8(priv, efuse_addr++, &val8); + if (ret) + goto exit; + priv->efuse_wifi.raw[map_addr++] = val8; } } -- cgit v1.2.3 From d38f1c3715d6de00d3b7a09254f9a0930d611bed Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 29 Feb 2016 17:04:25 -0500 Subject: rtl8xxxu: rtl8723au: Introduce a pointer to efuse Signed-off-by: Jakub Sitnicki Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 26 +++++++++++++----------- 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7a1c094f1f88..172d76c0dcb5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2127,43 +2127,45 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) { - if (priv->efuse_wifi.efuse8723.rtl_id != cpu_to_le16(0x8129)) + struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723; + + if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; - ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723.mac_addr); + ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, - priv->efuse_wifi.efuse8723.cck_tx_power_index_A, + efuse->cck_tx_power_index_A, sizeof(priv->cck_tx_power_index_A)); memcpy(priv->cck_tx_power_index_B, - priv->efuse_wifi.efuse8723.cck_tx_power_index_B, + efuse->cck_tx_power_index_B, sizeof(priv->cck_tx_power_index_B)); memcpy(priv->ht40_1s_tx_power_index_A, - priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_A, + efuse->ht40_1s_tx_power_index_A, sizeof(priv->ht40_1s_tx_power_index_A)); memcpy(priv->ht40_1s_tx_power_index_B, - priv->efuse_wifi.efuse8723.ht40_1s_tx_power_index_B, + efuse->ht40_1s_tx_power_index_B, sizeof(priv->ht40_1s_tx_power_index_B)); memcpy(priv->ht20_tx_power_index_diff, - priv->efuse_wifi.efuse8723.ht20_tx_power_index_diff, + efuse->ht20_tx_power_index_diff, sizeof(priv->ht20_tx_power_index_diff)); memcpy(priv->ofdm_tx_power_index_diff, - priv->efuse_wifi.efuse8723.ofdm_tx_power_index_diff, + efuse->ofdm_tx_power_index_diff, sizeof(priv->ofdm_tx_power_index_diff)); memcpy(priv->ht40_max_power_offset, - priv->efuse_wifi.efuse8723.ht40_max_power_offset, + efuse->ht40_max_power_offset, sizeof(priv->ht40_max_power_offset)); memcpy(priv->ht20_max_power_offset, - priv->efuse_wifi.efuse8723.ht20_max_power_offset, + efuse->ht20_max_power_offset, sizeof(priv->ht20_max_power_offset)); dev_info(&priv->udev->dev, "Vendor: %.7s\n", - priv->efuse_wifi.efuse8723.vendor_name); + efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.41s\n", - priv->efuse_wifi.efuse8723.device_name); + efuse->device_name); return 0; } -- cgit v1.2.3 From 4959444165523e68118035bebb727b9431746765 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 29 Feb 2016 17:04:26 -0500 Subject: rtl8xxxu: rtl8192cu: Introduce a pointer to efuse Signed-off-by: Jakub Sitnicki Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 29 ++++++++++++------------ 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 172d76c0dcb5..fcae9f020f5c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2218,50 +2218,51 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) { + struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192; int i; - if (priv->efuse_wifi.efuse8192.rtl_id != cpu_to_le16(0x8129)) + if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; - ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192.mac_addr); + ether_addr_copy(priv->mac_addr, efuse->mac_addr); memcpy(priv->cck_tx_power_index_A, - priv->efuse_wifi.efuse8192.cck_tx_power_index_A, + efuse->cck_tx_power_index_A, sizeof(priv->cck_tx_power_index_A)); memcpy(priv->cck_tx_power_index_B, - priv->efuse_wifi.efuse8192.cck_tx_power_index_B, + efuse->cck_tx_power_index_B, sizeof(priv->cck_tx_power_index_B)); memcpy(priv->ht40_1s_tx_power_index_A, - priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_A, + efuse->ht40_1s_tx_power_index_A, sizeof(priv->ht40_1s_tx_power_index_A)); memcpy(priv->ht40_1s_tx_power_index_B, - priv->efuse_wifi.efuse8192.ht40_1s_tx_power_index_B, + efuse->ht40_1s_tx_power_index_B, sizeof(priv->ht40_1s_tx_power_index_B)); memcpy(priv->ht40_2s_tx_power_index_diff, - priv->efuse_wifi.efuse8192.ht40_2s_tx_power_index_diff, + efuse->ht40_2s_tx_power_index_diff, sizeof(priv->ht40_2s_tx_power_index_diff)); memcpy(priv->ht20_tx_power_index_diff, - priv->efuse_wifi.efuse8192.ht20_tx_power_index_diff, + efuse->ht20_tx_power_index_diff, sizeof(priv->ht20_tx_power_index_diff)); memcpy(priv->ofdm_tx_power_index_diff, - priv->efuse_wifi.efuse8192.ofdm_tx_power_index_diff, + efuse->ofdm_tx_power_index_diff, sizeof(priv->ofdm_tx_power_index_diff)); memcpy(priv->ht40_max_power_offset, - priv->efuse_wifi.efuse8192.ht40_max_power_offset, + efuse->ht40_max_power_offset, sizeof(priv->ht40_max_power_offset)); memcpy(priv->ht20_max_power_offset, - priv->efuse_wifi.efuse8192.ht20_max_power_offset, + efuse->ht20_max_power_offset, sizeof(priv->ht20_max_power_offset)); dev_info(&priv->udev->dev, "Vendor: %.7s\n", - priv->efuse_wifi.efuse8192.vendor_name); + efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.20s\n", - priv->efuse_wifi.efuse8192.device_name); + efuse->device_name); - if (priv->efuse_wifi.efuse8192.rf_regulatory & 0x20) { + if (efuse->rf_regulatory & 0x20) { sprintf(priv->chip_name, "8188RU"); priv->hi_pa = 1; } -- cgit v1.2.3 From b7dda34d1ecda7e3243137a6ffcb7e9173e5d651 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:27 -0500 Subject: rtl8xxxu: rtl8192eu_parse_efuse(): Use a pointer to the struct rtl8192eu_efuse Make the code easier to read and less error prone by using a pointer to the efuse. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 26 +++++++++--------------- 1 file changed, 10 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index fcae9f020f5c..a8f1eae60815 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2288,33 +2288,27 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) { + struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu; int i; - if (priv->efuse_wifi.efuse8192eu.rtl_id != cpu_to_le16(0x8129)) + if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; - ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8192eu.mac_addr); + ether_addr_copy(priv->mac_addr, efuse->mac_addr); - memcpy(priv->cck_tx_power_index_A, - priv->efuse_wifi.efuse8192eu.cck_tx_power_index_A, + memcpy(priv->cck_tx_power_index_A, efuse->cck_tx_power_index_A, sizeof(priv->cck_tx_power_index_A)); - memcpy(priv->cck_tx_power_index_B, - priv->efuse_wifi.efuse8192eu.cck_tx_power_index_B, + memcpy(priv->cck_tx_power_index_B, efuse->cck_tx_power_index_B, sizeof(priv->cck_tx_power_index_B)); - memcpy(priv->ht40_1s_tx_power_index_A, - priv->efuse_wifi.efuse8192eu.ht40_1s_tx_power_index_A, + memcpy(priv->ht40_1s_tx_power_index_A, efuse->ht40_1s_tx_power_index_A, sizeof(priv->ht40_1s_tx_power_index_A)); - memcpy(priv->ht40_1s_tx_power_index_B, - priv->efuse_wifi.efuse8192eu.ht40_1s_tx_power_index_B, + memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, sizeof(priv->ht40_1s_tx_power_index_B)); - dev_info(&priv->udev->dev, "Vendor: %.7s\n", - priv->efuse_wifi.efuse8192eu.vendor_name); - dev_info(&priv->udev->dev, "Product: %.11s\n", - priv->efuse_wifi.efuse8192eu.device_name); - dev_info(&priv->udev->dev, "Serial: %.11s\n", - priv->efuse_wifi.efuse8192eu.serial); + dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); + dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name); + dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial); if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { unsigned char *raw = priv->efuse_wifi.raw; -- cgit v1.2.3 From b8ba8602b81a019c67bc1f34b6b68e8965086532 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:28 -0500 Subject: rtl8xxxu: rtl8723bu_parse_efuse(): Use a pointer to the struct rtl8723bu_efuse Likewise for 8723bu, use a pointer to the efuse. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a8f1eae60815..43bbbf00e1ac 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2171,29 +2171,25 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) { - if (priv->efuse_wifi.efuse8723bu.rtl_id != cpu_to_le16(0x8129)) + struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu; + + if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; - ether_addr_copy(priv->mac_addr, priv->efuse_wifi.efuse8723bu.mac_addr); + ether_addr_copy(priv->mac_addr, efuse->mac_addr); - memcpy(priv->cck_tx_power_index_A, - priv->efuse_wifi.efuse8723bu.cck_tx_power_index_A, + memcpy(priv->cck_tx_power_index_A, efuse->cck_tx_power_index_A, sizeof(priv->cck_tx_power_index_A)); - memcpy(priv->cck_tx_power_index_B, - priv->efuse_wifi.efuse8723bu.cck_tx_power_index_B, + memcpy(priv->cck_tx_power_index_B, efuse->cck_tx_power_index_B, sizeof(priv->cck_tx_power_index_B)); - memcpy(priv->ht40_1s_tx_power_index_A, - priv->efuse_wifi.efuse8723bu.ht40_1s_tx_power_index_A, + memcpy(priv->ht40_1s_tx_power_index_A, efuse->ht40_1s_tx_power_index_A, sizeof(priv->ht40_1s_tx_power_index_A)); - memcpy(priv->ht40_1s_tx_power_index_B, - priv->efuse_wifi.efuse8723bu.ht40_1s_tx_power_index_B, + memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, sizeof(priv->ht40_1s_tx_power_index_B)); - dev_info(&priv->udev->dev, "Vendor: %.7s\n", - priv->efuse_wifi.efuse8723bu.vendor_name); - dev_info(&priv->udev->dev, "Product: %.41s\n", - priv->efuse_wifi.efuse8723bu.device_name); + dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); + dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name); if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) { int i; -- cgit v1.2.3 From 8da91571bb401a3b9ffa36d7c6ca9c2476777e4d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:29 -0500 Subject: rtl8xxxu: rtl8xxxu_h2c_cmd(): Add size argument The firmware command API differs slightly between new and old devices. The new generation requires the size since there is no extension bit encoded into the command number. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 13 +++++++------ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 - 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 43bbbf00e1ac..b42d71b39c50 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1471,7 +1471,8 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv, return retval; } -static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c) +static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, + struct h2c_cmd *h2c, int len) { struct device *dev = &priv->udev->dev; int mbox_nr, retry, retval = 0; @@ -1504,7 +1505,7 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c) /* * Need to swap as it's being swapped again by rtl8xxxu_write16/32() */ - if (h2c->cmd.cmd & H2C_EXT) { + if (len > sizeof(u32)) { if (priv->fops->mbox_ext_width == 4) { rtl8xxxu_write32(priv, mbox_ext_reg, le32_to_cpu(h2c->raw_wide.ext)); @@ -5110,9 +5111,9 @@ static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, if (sgi) h2c.ramask.arg |= 0x20; - dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x\n", __func__, - ramask, h2c.ramask.arg); - rtl8723a_h2c_cmd(priv, &h2c); + dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %li\n", + __func__, ramask, h2c.ramask.arg, sizeof(h2c.ramask)); + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask)); } static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) @@ -5200,7 +5201,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT; } h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT; - rtl8723a_h2c_cmd(priv, &h2c); + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss)); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 198b1d874551..47898ff49ffc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -720,7 +720,6 @@ struct h2c_cmd { struct { u8 cmd; u8 data; - u8 pad[4]; } __packed joinbss; struct { u8 cmd; -- cgit v1.2.3 From c7a5a190df232b833b7867ef1679dff561b73c84 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:30 -0500 Subject: rtl8xxxu: Do BT_WLAN_CALIBRATION before doing IQK calibration Newer generation chips require the firmware be notified before we start the IQK calibration. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 20 +++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++++ 2 files changed, 23 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index b42d71b39c50..e0653b217883 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1497,7 +1497,7 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv, } while (retry--); if (!retry) { - dev_dbg(dev, "%s: Mailbox busy\n", __func__); + dev_info(dev, "%s: Mailbox busy\n", __func__); retval = -EBUSY; goto error; } @@ -3877,6 +3877,20 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, } } +static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start) +{ + struct h2c_cmd h2c; + + if (priv->fops->mbox_ext_width < 4) + return; + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.bt_wlan_calibration.cmd = H2C_8723B_BT_WLAN_CALIBRATION; + h2c.bt_wlan_calibration.data = start; + + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration)); +} + static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; @@ -3888,6 +3902,8 @@ static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv) s32 reg_tmp = 0; bool simu; + rtl8xxxu_prepare_calibrate(priv, 1); + memset(result, 0, sizeof(result)); candidate = -1; @@ -3975,6 +3991,8 @@ static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, priv->bb_recovery_backup, RTL8XXXU_BB_REGS); + + rtl8xxxu_prepare_calibrate(priv, 0); } static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 47898ff49ffc..07533acaa9c6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -727,6 +727,10 @@ struct h2c_cmd { u8 arg; __le16 mask_lo; } __packed ramask; + struct { + u8 cmd; + u8 data; + } __packed bt_wlan_calibration; }; }; -- cgit v1.2.3 From eaa4d14c97c1990327fd4e9fc49beca0ce7baa14 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:31 -0500 Subject: rtl8xxxu: Do not overwrite rtl8xxxu_debug for untested chips Fix a silly bug where the debug level was overwritten rather than amended for untested chips. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e0653b217883..cfea68b7eb4d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6391,7 +6391,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, } if (untested) { - rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE; + rtl8xxxu_debug |= RTL8XXXU_DEBUG_EFUSE; dev_info(&udev->dev, "This Realtek USB WiFi dongle (0x%04x:0x%04x) is untested!\n", id->idVendor, id->idProduct); -- cgit v1.2.3 From 7ff8c1ae610159fcb6e7188224520cf0f8b7a336 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:32 -0500 Subject: rtl8xxxu: Use correct formatting type to print sizeof() Usual gcc i386 issue reported by kbuildbot Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index cfea68b7eb4d..171e0388d5ec 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5129,7 +5129,7 @@ static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, if (sgi) h2c.ramask.arg |= 0x20; - dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %li\n", + dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n", __func__, ramask, h2c.ramask.arg, sizeof(h2c.ramask)); rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask)); } -- cgit v1.2.3 From 8634af5e6a8dea0f7110f32c00ceffee71be8a80 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:33 -0500 Subject: rtl8xxxu: Make rtl8xxxu_add_path_on() use device specific init values rtl8192cu/rtl8188cu/rtl8723au use the same values, but 8723bu and 8192eu have their own. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 24 +++++++++++++++++++++--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++++ 2 files changed, 25 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 171e0388d5ec..a16dabfa713e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3557,11 +3557,13 @@ static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs, u32 path_on; int i; - path_on = path_a_on ? 0x04db25a4 : 0x0b1b25a4; if (priv->tx_paths == 1) { - path_on = 0x0bdb25a0; - rtl8xxxu_write32(priv, regs[0], 0x0b1b25a0); + path_on = priv->fops->adda_1t_path_on; + rtl8xxxu_write32(priv, regs[0], priv->fops->adda_1t_init); } else { + path_on = path_a_on ? priv->fops->adda_2t_path_on_a : + priv->fops->adda_2t_path_on_b; + rtl8xxxu_write32(priv, regs[0], path_on); } @@ -6537,6 +6539,10 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, + .adda_1t_init = 0x0b1b25a0, + .adda_1t_path_on = 0x0bdb25a0, + .adda_2t_path_on_a = 0x04db25a4, + .adda_2t_path_on_b = 0x0b1b25a4, }; static struct rtl8xxxu_fileops rtl8723bu_fops = { @@ -6548,6 +6554,10 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, + .adda_1t_init = 0x01c00014, + .adda_1t_path_on = 0x01c00014, + .adda_2t_path_on_a = 0x01c00014, + .adda_2t_path_on_b = 0x01c00014, }; #ifdef CONFIG_RTL8XXXU_UNTESTED @@ -6560,6 +6570,10 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, + .adda_1t_init = 0x0b1b25a0, + .adda_1t_path_on = 0x0bdb25a0, + .adda_2t_path_on_a = 0x04db25a4, + .adda_2t_path_on_b = 0x0b1b25a4, }; #endif @@ -6572,6 +6586,10 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, + .adda_1t_init = 0x0fc01616, + .adda_1t_path_on = 0x0fc01616, + .adda_2t_path_on_a = 0x0fc01616, + .adda_2t_path_on_b = 0x0fc01616, }; static struct usb_device_id dev_table[] = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 07533acaa9c6..4b8431db38d2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -854,4 +854,8 @@ struct rtl8xxxu_fileops { int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; + u32 adda_1t_init; + u32 adda_1t_path_on; + u32 adda_2t_path_on_a; + u32 adda_2t_path_on_b; }; -- cgit v1.2.3 From c6594ffd46a4b26d2a2d59c54da399ec563b6b2f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:34 -0500 Subject: rtl8xxxu: Add a couple of new register definitions This adds some additional register definitions for 8723bu, as well as a bit define for USB RXDMA aggregation in REG_RXDMA_AGG_PG_TH. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index f1375b8904da..162a9e8e4cef 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -388,9 +388,14 @@ /* 0x0280 ~ 0x02FF RXDMA Configuration */ #define REG_RXDMA_AGG_PG_TH 0x0280 +#define RXDMA_USB_AGG_ENABLE BIT(31) #define REG_RXPKT_NUM 0x0284 #define REG_RXDMA_STATUS 0x0288 +/* Presumably only found on newer chips such as 8723bu */ +#define REG_RX_DMA_CTRL_8723B 0x0286 +#define REG_RX_DMA_MODE_CTRL_8723B 0x0290 + #define REG_RF_BB_CMD_ADDR 0x02c0 #define REG_RF_BB_CMD_DATA 0x02c4 @@ -473,6 +478,7 @@ #define REG_POWER_STATUS 0x04a4 #define REG_POWER_STAGE1 0x04b4 #define REG_POWER_STAGE2 0x04b8 +#define REG_AMPDU_BURST_MODE_8723B 0x04bc #define REG_PKT_VO_VI_LIFE_TIME 0x04c0 #define REG_PKT_BE_BK_LIFE_TIME 0x04c2 #define REG_STBC_SETTING 0x04c4 -- cgit v1.2.3 From e1547c535ede26c84b72d51e031dd57551037bec Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:35 -0500 Subject: rtl8xxxu: First stab at adding IQK calibration for 8723bu parts The 8723bu also has it's own IQK calibration process. This is similar in flow, but still different enough to warrent it's own implementation, at least for now. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 811 ++++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 17 + 3 files changed, 827 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a16dabfa713e..a40ef21016e2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3510,6 +3510,91 @@ static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv, return false; } +static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv, + int result[][8], int c1, int c2) +{ + u32 i, j, diff, simubitmap, bound = 0; + int candidate[2] = {-1, -1}; /* for path A and path B */ + int tmp1, tmp2; + bool retval = true; + + if (priv->tx_paths > 1) + bound = 8; + else + bound = 4; + + simubitmap = 0; + + for (i = 0; i < bound; i++) { + if (i & 1) { + if ((result[c1][i] & 0x00000200)) + tmp1 = result[c1][i] | 0xfffffc00; + else + tmp1 = result[c1][i]; + + if ((result[c2][i]& 0x00000200)) + tmp2 = result[c2][i] | 0xfffffc00; + else + tmp2 = result[c2][i]; + } else { + tmp1 = result[c1][i]; + tmp2 = result[c2][i]; + } + + diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1); + + if (diff > MAX_TOLERANCE) { + if ((i == 2 || i == 6) && !simubitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + candidate[(i / 4)] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + candidate[(i / 4)] = c1; + else + simubitmap = simubitmap | (1 << i); + } else { + simubitmap = simubitmap | (1 << i); + } + } + } + + if (simubitmap == 0) { + for (i = 0; i < (bound / 4); i++) { + if (candidate[i] >= 0) { + for (j = i * 4; j < (i + 1) * 4 - 2; j++) + result[3][j] = result[candidate[i]][j]; + retval = false; + } + } + return retval; + } else { + if (!(simubitmap & 0x03)) { + /* path A TX OK */ + for (i = 0; i < 2; i++) + result[3][i] = result[c1][i]; + } + + if (!(simubitmap & 0x0c)) { + /* path A RX OK */ + for (i = 2; i < 4; i++) + result[3][i] = result[c1][i]; + } + + if (!(simubitmap & 0x30) && priv->tx_paths > 1) { + /* path B RX OK */ + for (i = 4; i < 6; i++) + result[3][i] = result[c1][i]; + } + + if (!(simubitmap & 0x30) && priv->tx_paths > 1) { + /* path B RX OK */ + for (i = 6; i < 8; i++) + result[3][i] = result[c1][i]; + } + } + + return false; +} + static void rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup) { @@ -3677,6 +3762,369 @@ out: return result; } +static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_e94, reg_e9c, path_sel, val32; + int result = 0; + + path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); + + /* + * Leave IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* + * Enable path A PA in TX IQK mode + */ + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); + val32 |= 0x80000; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87); + + /* + * Tx IQK setting + */ + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* path-A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000); + rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911); + + /* + * Enter IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + val32 |= 0x80800000; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* + * The vendor driver indicates the USB module is always using + * S0S1 path 1 for the 8723bu. This may be different for 8192eu + */ + if (priv->rf_paths > 1) + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); + else + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); + + /* + * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu. + * No trace of this in the 8192eu or 8188eu vendor drivers. + */ + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(1); + + /* Restore Ant Path */ + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); +#ifdef RTL8723BU_BT + /* GNT_BT = 1 */ + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); +#endif + + /* + * Leave IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); + reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); + + val32 = (reg_e9c >> 16) & 0x3ff; + if (val32 & 0x200) + val32 = 0x400 - val32; + + if (!(reg_eac & BIT(28)) && + ((reg_e94 & 0x03ff0000) != 0x01420000) && + ((reg_e9c & 0x03ff0000) != 0x00420000) && + ((reg_e94 & 0x03ff0000) < 0x01100000) && + ((reg_e94 & 0x03ff0000) > 0x00f00000) && + val32 < 0xf) + result |= 0x01; + else /* If TX not OK, ignore RX */ + goto out; + +out: + return result; +} + +static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv) +{ + u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32; + int result = 0; + + path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); + + /* + * Leave IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* + * Enable path A PA in TX IQK mode + */ + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); + val32 |= 0x80000; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7); + + /* + * Tx IQK setting + */ + rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00); + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* path-A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000); + rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911); + + /* + * Enter IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + val32 |= 0x80800000; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* + * The vendor driver indicates the USB module is always using + * S0S1 path 1 for the 8723bu. This may be different for 8192eu + */ + if (priv->rf_paths > 1) + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); + else + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); + + /* + * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu. + * No trace of this in the 8192eu or 8188eu vendor drivers. + */ + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(1); + + /* Restore Ant Path */ + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); +#ifdef RTL8723BU_BT + /* GNT_BT = 1 */ + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); +#endif + + /* + * Leave IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A); + reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A); + + val32 = (reg_e9c >> 16) & 0x3ff; + if (val32 & 0x200) + val32 = 0x400 - val32; + + if (!(reg_eac & BIT(28)) && + ((reg_e94 & 0x03ff0000) != 0x01420000) && + ((reg_e9c & 0x03ff0000) != 0x00420000) && + ((reg_e94 & 0x03ff0000) < 0x01100000) && + ((reg_e94 & 0x03ff0000) > 0x00f00000) && + val32 < 0xf) + result |= 0x01; + else /* If TX not OK, ignore RX */ + goto out; + + val32 = 0x80007c00 | (reg_e94 &0x3ff0000) | + ((reg_e9c & 0x3ff0000) >> 16); + rtl8xxxu_write32(priv, REG_TX_IQK, val32); + + /* + * Modify RX IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); + val32 |= 0x80000; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77); + + /* + * PA, PAD setting + */ + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0xf80); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f); + + /* + * RX IQK setting + */ + rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800); + + /* path-A IQK setting */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c); + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c); + + rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f); + rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000); + rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000); + + /* LO calibration setting */ + rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1); + + /* + * Enter IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + val32 |= 0x80800000; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + if (priv->rf_paths > 1) + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000); + else + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280); + + /* + * Disable BT + */ + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800); + + /* One shot, path A LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000); + rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000); + + mdelay(1); + + /* Restore Ant Path */ + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel); +#ifdef RTL8723BU_BT + /* GNT_BT = 1 */ + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800); +#endif + + /* + * Leave IQK mode + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2); + + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x780); + + val32 = (reg_eac >> 16) & 0x3ff; + if (val32 & 0x200) + val32 = 0x400 - val32; + + if (!(reg_eac & BIT(27)) && + ((reg_ea4 & 0x03ff0000) != 0x01320000) && + ((reg_eac & 0x03ff0000) != 0x00360000) && + ((reg_ea4 & 0x03ff0000) < 0x01100000) && + ((reg_ea4 & 0x03ff0000) > 0x00f00000) && + val32 < 0xf) + result |= 0x02; + else /* If TX not OK, ignore RX */ + goto out; +out: + return result; +} + +#ifdef RTL8723BU_PATH_B +static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv) +{ + u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel; + int result = 0; + + path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* One shot, path B LOK & IQK */ + rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002); + rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000); + + mdelay(1); + + /* Check failed */ + reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2); + reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2); + reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2); + + if (!(reg_eac & BIT(31)) && + ((reg_eb4 & 0x03ff0000) != 0x01420000) && + ((reg_ebc & 0x03ff0000) != 0x00420000)) + result |= 0x01; + else + goto out; + + if (!(reg_eac & BIT(30)) && + (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) && + (((reg_ecc & 0x03ff0000) >> 16) != 0x36)) + result |= 0x02; + else + dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n", + __func__); +out: + return result; +} +#endif + static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, int result[][8], int t) { @@ -3879,6 +4327,234 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, } } +static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv, + int result[][8], int t) +{ + struct device *dev = &priv->udev->dev; + u32 i, val32; + int path_a_ok /*, path_b_ok */; + int retry = 2; + const u32 adda_regs[RTL8XXXU_ADDA_REGS] = { + REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH, + REG_RX_WAIT_CCA, REG_TX_CCK_RFON, + REG_TX_CCK_BBON, REG_TX_OFDM_RFON, + REG_TX_OFDM_BBON, REG_TX_TO_RX, + REG_TX_TO_TX, REG_RX_CCK, + REG_RX_OFDM, REG_RX_WAIT_RIFS, + REG_RX_TO_RX, REG_STANDBY, + REG_SLEEP, REG_PMPD_ANAEN + }; + const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = { + REG_TXPAUSE, REG_BEACON_CTRL, + REG_BEACON_CTRL_1, REG_GPIO_MUXCFG + }; + const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = { + REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR, + REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B, + REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE, + REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE + }; + u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff; + u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff; + + /* + * Note: IQ calibration must be performed after loading + * PHY_REG.txt , and radio_a, radio_b.txt + */ + + if (t == 0) { + /* Save ADDA parameters, turn Path A ADDA on */ + rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup, + RTL8XXXU_ADDA_REGS); + rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup); + rtl8xxxu_save_regs(priv, iqk_bb_regs, + priv->bb_backup, RTL8XXXU_BB_REGS); + } + + rtl8xxxu_path_adda_on(priv, adda_regs, true); + + /* MAC settings */ + rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup); + + val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING); + val32 |= 0x0f000000; + rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32); + + rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600); + rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4); + rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000); + +#ifdef RTL8723BU_PATH_B + /* Set RF mode to standby Path B */ + if (priv->tx_paths > 1) + rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000); +#endif + +#if 0 + /* Page B init */ + rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000); + + if (priv->tx_paths > 1) + rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000); +#endif + + /* + * RX IQ calibration setting for 8723B D cut large current issue + * when leaving IPS + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); + val32 |= 0x80000; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); + + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7); + + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED); + val32 |= 0x20; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); + + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd); + + for (i = 0; i < retry; i++) { + path_a_ok = rtl8723bu_iqk_path_a(priv); + if (path_a_ok == 0x01) { + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + +#if 0 /* Only needed in restore case, we may need this when going to suspend */ + priv->RFCalibrateInfo.TxLOK[RF_A] = + rtl8xxxu_read_rfreg(priv, RF_A, + RF6052_REG_TXM_IDAC); +#endif + + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_BEFORE_IQK_A); + result[t][0] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_TX_POWER_AFTER_IQK_A); + result[t][1] = (val32 >> 16) & 0x3ff; + + break; + } + } + + if (!path_a_ok) + dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__); + + for (i = 0; i < retry; i++) { + path_a_ok = rtl8723bu_rx_iqk_path_a(priv); + if (path_a_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_BEFORE_IQK_A_2); + result[t][2] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_AFTER_IQK_A_2); + result[t][3] = (val32 >> 16) & 0x3ff; + + break; + } + } + + if (!path_a_ok) + dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__); + + if (priv->tx_paths > 1) { +#if 1 + dev_warn(dev, "%s: Path B not supported\n", __func__); +#else + + /* + * Path A into standby + */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + val32 |= 0x80800000; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + /* Turn Path B ADDA on */ + rtl8xxxu_path_adda_on(priv, adda_regs, false); + + for (i = 0; i < retry; i++) { + path_b_ok = rtl8xxxu_iqk_path_b(priv); + if (path_b_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B); + result[t][4] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B); + result[t][5] = (val32 >> 16) & 0x3ff; + break; + } + } + + if (!path_b_ok) + dev_dbg(dev, "%s: Path B IQK failed!\n", __func__); + + for (i = 0; i < retry; i++) { + path_b_ok = rtl8723bu_rx_iqk_path_b(priv); + if (path_a_ok == 0x03) { + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_BEFORE_IQK_B_2); + result[t][6] = (val32 >> 16) & 0x3ff; + val32 = rtl8xxxu_read32(priv, + REG_RX_POWER_AFTER_IQK_B_2); + result[t][7] = (val32 >> 16) & 0x3ff; + break; + } + } + + if (!path_b_ok) + dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__); +#endif + } + + /* Back to BB mode, load original value */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 &= 0x000000ff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + + if (t) { + /* Reload ADDA power saving parameters */ + rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup, + RTL8XXXU_ADDA_REGS); + + /* Reload MAC parameters */ + rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup); + + /* Reload BB parameters */ + rtl8xxxu_restore_regs(priv, iqk_bb_regs, + priv->bb_backup, RTL8XXXU_BB_REGS); + + /* Restore RX initial gain */ + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); + val32 &= 0xffffff00; + rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50); + rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc); + + if (priv->tx_paths > 1) { + val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1); + val32 &= 0xffffff00; + rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, + val32 | 0x50); + rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1, + val32 | xb_agc); + } + + /* Load 0xe30 IQC default value */ + rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00); + rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00); + } +} + static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start) { struct h2c_cmd h2c; @@ -3893,7 +4569,7 @@ static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start) rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration)); } -static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv) +static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; int result[4][8]; /* last is final result */ @@ -3997,6 +4673,133 @@ static void rtl8723a_phy_iq_calibrate(struct rtl8xxxu_priv *priv) rtl8xxxu_prepare_calibrate(priv, 0); } +static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv) +{ + struct device *dev = &priv->udev->dev; + int result[4][8]; /* last is final result */ + int i, candidate; + bool path_a_ok, path_b_ok; + u32 reg_e94, reg_e9c, reg_ea4, reg_eac; + u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc; + u32 val32, bt_control; + s32 reg_tmp = 0; + bool simu; + + rtl8xxxu_prepare_calibrate(priv, 1); + + memset(result, 0, sizeof(result)); + candidate = -1; + + path_a_ok = false; + path_b_ok = false; + + bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU); + + for (i = 0; i < 3; i++) { + rtl8723bu_phy_iqcalibrate(priv, result, i); + + if (i == 1) { + simu = rtl8723bu_simularity_compare(priv, result, 0, 1); + if (simu) { + candidate = 0; + break; + } + } + + if (i == 2) { + simu = rtl8723bu_simularity_compare(priv, result, 0, 2); + if (simu) { + candidate = 0; + break; + } + + simu = rtl8723bu_simularity_compare(priv, result, 1, 2); + if (simu) { + candidate = 1; + } else { + for (i = 0; i < 8; i++) + reg_tmp += result[3][i]; + + if (reg_tmp) + candidate = 3; + else + candidate = -1; + } + } + } + + for (i = 0; i < 4; i++) { + reg_e94 = result[i][0]; + reg_e9c = result[i][1]; + reg_ea4 = result[i][2]; + reg_eac = result[i][3]; + reg_eb4 = result[i][4]; + reg_ebc = result[i][5]; + reg_ec4 = result[i][6]; + reg_ecc = result[i][7]; + } + + if (candidate >= 0) { + reg_e94 = result[candidate][0]; + priv->rege94 = reg_e94; + reg_e9c = result[candidate][1]; + priv->rege9c = reg_e9c; + reg_ea4 = result[candidate][2]; + reg_eac = result[candidate][3]; + reg_eb4 = result[candidate][4]; + priv->regeb4 = reg_eb4; + reg_ebc = result[candidate][5]; + priv->regebc = reg_ebc; + reg_ec4 = result[candidate][6]; + reg_ecc = result[candidate][7]; + dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate); + dev_dbg(dev, + "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x " + "ecc=%x\n ", __func__, reg_e94, reg_e9c, + reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc); + path_a_ok = true; + path_b_ok = true; + } else { + reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100; + reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0; + } + + if (reg_e94 && candidate >= 0) + rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result, + candidate, (reg_ea4 == 0)); + + if (priv->tx_paths > 1 && reg_eb4) + rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result, + candidate, (reg_ec4 == 0)); + + rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg, + priv->bb_recovery_backup, RTL8XXXU_BB_REGS); + + rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control); + + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT); + val32 |= 0x80000; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177); + val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED); + val32 |= 0x20; + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32); + rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd); + + if (priv->rf_paths > 1) { + dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__); +#ifdef RTL8723BU_PATH_B + if (RF_Path == 0x0) //S1 + ODM_SetIQCbyRFpath(pDM_Odm, 0); + else //S0 + ODM_SetIQCbyRFpath(pDM_Odm, 1); +#endif + } + rtl8xxxu_prepare_calibrate(priv, 0); +} + static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv) { u32 val32; @@ -4995,7 +5798,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0); - rtl8723a_phy_iq_calibrate(priv); + priv->fops->phy_iq_calibrate(priv); /* * This should enable thermal meter @@ -6536,6 +7339,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .load_firmware = rtl8723au_load_firmware, .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_init_llt_table, + .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -6551,6 +7355,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_auto_llt_table, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, + .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -6567,6 +7372,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .load_firmware = rtl8192cu_load_firmware, .power_on = rtl8192cu_power_on, .llt_init = rtl8xxxu_init_llt_table, + .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -6583,6 +7389,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .load_firmware = rtl8192eu_load_firmware, .power_on = rtl8192eu_power_on, .llt_init = rtl8xxxu_auto_llt_table, + .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 4b8431db38d2..76398de2b110 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -851,6 +851,7 @@ struct rtl8xxxu_fileops { int (*power_on) (struct rtl8xxxu_priv *priv); int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); + void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 162a9e8e4cef..499210f81845 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -708,6 +708,12 @@ #define REG_MACID1 0x0700 #define REG_BSSID1 0x0708 +/* + * This seems to be 8723bu specific + */ +#define REG_BT_CONTROL_8723BU 0x0764 +#define BT_CONTROL_BT_GRANT BIT(12) + #define REG_FPGA0_RF_MODE 0x0800 #define FPGA_RF_MODE BIT(0) #define FPGA_RF_MODE_JAPAN BIT(1) @@ -810,6 +816,7 @@ #define REG_RFE_CTRL_ANTA_SRC 0x0930 /* 8723BU */ #define REG_RFE_PATH_SELECT 0x0940 /* 8723BU */ #define REG_RFE_BUFFER 0x0944 /* 8723BU */ +#define REG_S0S1_PATH_SWITCH 0x0948 /* 8723BU */ #define REG_CCK0_SYSTEM 0x0a00 #define CCK0_SIDEBAND BIT(4) @@ -1050,3 +1057,13 @@ #define RF6052_REG_TXPA_G1 0x31 /* RF TX PA control */ #define RF6052_REG_TXPA_G2 0x32 /* RF TX PA control */ #define RF6052_REG_TXPA_G3 0x33 /* RF TX PA control */ + +/* + * NextGen regs: 8723BU + */ +#define RF6052_REG_UNKNOWN_43 0x43 +#define RF6052_REG_UNKNOWN_55 0x55 +#define RF6052_REG_S0S1 0xb0 +#define RF6052_REG_UNKNOWN_DF 0xdf +#define RF6052_REG_UNKNOWN_ED 0xed +#define RF6052_REG_WE_LUT 0xef -- cgit v1.2.3 From 0d698dec06f828480a95a0c7a91c13bc4ad374e4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:36 -0500 Subject: rtl8xxxu: Handle S0S1 register in lc_calibrate() Newer chips (8723bu/8192eu) has S0S1 settings which needs to be dealt with during LC calibration. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 7 +++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a40ef21016e2..cfd016c5b7ef 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -4835,12 +4835,17 @@ static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv) } /* Start LC calibration */ + if (priv->fops->has_s0s1) + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, 0xdfbe0); val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_MODE_AG); val32 |= 0x08000; rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, val32); msleep(100); + if (priv->fops->has_s0s1) + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_S0S1, 0xdffe0); + /* Restore original parameters */ if (lstf & OFDM_LSTF_MASK) { /* Path-A */ @@ -7359,6 +7364,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, + .has_s0s1 = 1, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, .adda_2t_path_on_a = 0x01c00014, @@ -7393,6 +7399,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, + .has_s0s1 = 1, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, .adda_2t_path_on_a = 0x0fc01616, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 76398de2b110..4a974a698dcc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -855,6 +855,7 @@ struct rtl8xxxu_fileops { int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; + char has_s0s1; u32 adda_1t_init; u32 adda_1t_path_on; u32 adda_2t_path_on_a; -- cgit v1.2.3 From fa0f2d481d178fa2bf44f9488a695ab56d4e78f6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:37 -0500 Subject: rtl8xxxu: Do LC calibration before IQK calibration This matches the flow of the vendor driver for newer hardware, and doesn't seem to cause issues for the older parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index cfd016c5b7ef..62537bd824a5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5803,6 +5803,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0); + rtl8723a_phy_lc_calibrate(priv); + priv->fops->phy_iq_calibrate(priv); /* @@ -5810,8 +5812,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60); - rtl8723a_phy_lc_calibrate(priv); - /* Init BT hw config. */ rtl8xxxu_init_bt(priv); -- cgit v1.2.3 From 60c76cbadb61084139d710ca710959274efdde5b Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:38 -0500 Subject: rtl8xxxu: Remove backing up certain registers, which was never used This was inspired by the vendor driver, but in the end never used for anything. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 14 -------------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 3 --- 2 files changed, 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 62537bd824a5..4579a3ea07c1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5815,20 +5815,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* Init BT hw config. */ rtl8xxxu_init_bt(priv); - /* - * Not sure if we really need to save these parameters, but the - * vendor driver does - */ - val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_HSSI_PARM2); - if (val32 & FPGA0_HSSI_PARM2_CCK_HIGH_PWR) - priv->path_a_hi_power = 1; - - val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE); - priv->path_a_rf_paths = val32 & OFDM_RF_PATH_RX_MASK; - - val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1); - priv->path_a_ig_value = val32 & OFDM0_X_AGC_CORE1_IGI_MASK; - /* Set NAV_UPPER to 30000us */ val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 4a974a698dcc..7b76e11519a6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -785,13 +785,10 @@ struct rtl8xxxu_priv { u32 ep_tx_high_queue:1; u32 ep_tx_normal_queue:1; u32 ep_tx_low_queue:1; - u32 path_a_hi_power:1; - u32 path_a_rf_paths:4; unsigned int pipe_interrupt; unsigned int pipe_in; unsigned int pipe_out[TXDESC_QUEUE_MAX]; u8 out_ep[RTL8XXXU_OUT_ENDPOINTS]; - u8 path_a_ig_value; u8 ep_tx_count; u8 rf_paths; u8 rx_paths; -- cgit v1.2.3 From e6f9a9c3b5235f31434a9de9034ea5eda1532113 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 29 Feb 2016 17:04:39 -0500 Subject: rtl8xxxu: rtl8192eu: Map out EFUSE TX power area TX power values are laid out differently in EFUSE found in RTL8192EU & RTL8188EU devices. TX power indices and differences for each RF path are not interleaved (A, B, A, B), as in other chips, but follow one another (A, B, C, D). Signed-off-by: Jakub Sitnicki Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 10 ---------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 22 +++++++++++++++++----- 2 files changed, 17 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4579a3ea07c1..4edd9c79f59b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2293,16 +2293,6 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) ether_addr_copy(priv->mac_addr, efuse->mac_addr); - memcpy(priv->cck_tx_power_index_A, efuse->cck_tx_power_index_A, - sizeof(priv->cck_tx_power_index_A)); - memcpy(priv->cck_tx_power_index_B, efuse->cck_tx_power_index_B, - sizeof(priv->cck_tx_power_index_B)); - - memcpy(priv->ht40_1s_tx_power_index_A, efuse->ht40_1s_tx_power_index_A, - sizeof(priv->ht40_1s_tx_power_index_A)); - memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, - sizeof(priv->ht40_1s_tx_power_index_B)); - dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name); dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 7b76e11519a6..6c6bac253473 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -543,14 +543,26 @@ struct rtl8723bu_efuse { u8 res12[0x4]; }; +struct rtl8192eu_efuse_tx_power { + u8 cck_base[6]; + u8 ht40_base[5]; + struct rtl8723au_idx ht20_ofdm_1s_diff; + struct rtl8723au_idx ht40_ht20_2s_diff; + struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */ + struct rtl8723au_idx ht40_ht20_3s_diff; + struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */ + struct rtl8723au_idx ht40_ht20_4s_diff; + struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */ +}; + struct rtl8192eu_efuse { __le16 rtl_id; u8 res0[0x0e]; - u8 cck_tx_power_index_A[3]; /* 0x10 */ - u8 cck_tx_power_index_B[3]; - u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ - u8 ht40_1s_tx_power_index_B[3]; - u8 res1[0x9c]; + struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */ + struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x22 */ + struct rtl8192eu_efuse_tx_power tx_power_index_C; /* 0x34 */ + struct rtl8192eu_efuse_tx_power tx_power_index_D; /* 0x46 */ + u8 res1[0x60]; u8 channel_plan; /* 0xb8 */ u8 xtal_k; u8 thermal_meter; -- cgit v1.2.3 From c3f9506f2374772e579e1c19b86d79f1da71dea2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:40 -0500 Subject: rtl8xxxu: Initial implementation of rtl8723bu_config_channel() This is a first stab of implementing rtl8723bu_config_channel(). For now this will only do 20MHz channels. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 134 +++++++++++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 14 +++ 3 files changed, 149 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4edd9c79f59b..ada0f040f41a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1777,6 +1777,136 @@ static void rtl8723au_config_channel(struct ieee80211_hw *hw) } } +static void rtl8723bu_config_channel(struct ieee80211_hw *hw) +{ + struct rtl8xxxu_priv *priv = hw->priv; + u32 val32, rsr; + u8 val8, opmode, subchannel; + u16 rf_mode_bw; + bool ht = true; + int sec_ch_above, channel; + int i; + + rf_mode_bw = rtl8xxxu_read16(priv, REG_WMAC_TRXPTCL_CTL); + rf_mode_bw &= ~WMAC_TRXPTCL_CTL_BW_MASK; + rsr = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET); + channel = hw->conf.chandef.chan->hw_value; + +/* Hack */ + subchannel = 0; + + switch (hw->conf.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + ht = false; + case NL80211_CHAN_WIDTH_20: + rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20; + subchannel = 0; + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 &= ~FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE); + val32 &= ~FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT); + val32 &= ~(BIT(30) | BIT(31)); + rtl8xxxu_write32(priv, REG_OFDM0_TX_PSDO_NOISE_WEIGHT, val32); + + break; + case NL80211_CHAN_WIDTH_40: + rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_40; + + if (hw->conf.chandef.center_freq1 > + hw->conf.chandef.chan->center_freq) { + sec_ch_above = 1; + channel += 2; + } else { + sec_ch_above = 0; + channel -= 2; + } + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_RF_MODE); + val32 |= FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA1_RF_MODE); + val32 |= FPGA_RF_MODE; + rtl8xxxu_write32(priv, REG_FPGA1_RF_MODE, val32); + + /* + * Set Control channel to upper or lower. These settings + * are required only for 40MHz + */ + val32 = rtl8xxxu_read32(priv, REG_CCK0_SYSTEM); + val32 &= ~CCK0_SIDEBAND; + if (!sec_ch_above) + val32 |= CCK0_SIDEBAND; + rtl8xxxu_write32(priv, REG_CCK0_SYSTEM, val32); + + val32 = rtl8xxxu_read32(priv, REG_OFDM1_LSTF); + val32 &= ~OFDM_LSTF_PRIME_CH_MASK; /* 0xc00 */ + if (sec_ch_above) + val32 |= OFDM_LSTF_PRIME_CH_LOW; + else + val32 |= OFDM_LSTF_PRIME_CH_HIGH; + rtl8xxxu_write32(priv, REG_OFDM1_LSTF, val32); + + val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE); + val32 &= ~(FPGA0_PS_LOWER_CHANNEL | FPGA0_PS_UPPER_CHANNEL); + if (sec_ch_above) + val32 |= FPGA0_PS_UPPER_CHANNEL; + else + val32 |= FPGA0_PS_LOWER_CHANNEL; + rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32); + break; + case NL80211_CHAN_WIDTH_80: + rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_80; + break; + default: + break; + } + + for (i = RF_A; i < priv->rf_paths; i++) { + val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG); + val32 &= ~MODE_AG_CHANNEL_MASK; + val32 |= channel; + rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32); + } + + rtl8xxxu_write16(priv, REG_WMAC_TRXPTCL_CTL, rf_mode_bw); + rtl8xxxu_write8(priv, REG_DATA_SUBCHANNEL, subchannel); + + if (ht) + val8 = 0x0e; + else + val8 = 0x0a; + + rtl8xxxu_write8(priv, REG_SIFS_CCK + 1, val8); + rtl8xxxu_write8(priv, REG_SIFS_OFDM + 1, val8); + + rtl8xxxu_write16(priv, REG_R2T_SIFS, 0x0808); + rtl8xxxu_write16(priv, REG_T2T_SIFS, 0x0a0a); + + for (i = RF_A; i < priv->rf_paths; i++) { + val32 = rtl8xxxu_read_rfreg(priv, i, RF6052_REG_MODE_AG); + val32 &= ~MODE_AG_BW_MASK; + switch(hw->conf.chandef.width) { + case NL80211_CHAN_WIDTH_80: + val32 |= MODE_AG_BW_80MHZ_8723B; + break; + case NL80211_CHAN_WIDTH_40: + val32 |= MODE_AG_BW_40MHZ_8723B; + break; + default: + val32 |= MODE_AG_BW_20MHZ_8723B; + break; + } + rtl8xxxu_write_rfreg(priv, i, RF6052_REG_MODE_AG, val32); + } +} + static void rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { @@ -7321,6 +7451,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .power_on = rtl8723au_power_on, .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, + .config_channel = rtl8723au_config_channel, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -7337,6 +7468,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .llt_init = rtl8xxxu_auto_llt_table, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, + .config_channel = rtl8723bu_config_channel, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -7355,6 +7487,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .power_on = rtl8192cu_power_on, .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, + .config_channel = rtl8723au_config_channel, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -7372,6 +7505,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .power_on = rtl8192eu_power_on, .llt_init = rtl8xxxu_auto_llt_table, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, + .config_channel = rtl8723bu_config_channel, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 6c6bac253473..bce52cdc731a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -861,6 +861,7 @@ struct rtl8xxxu_fileops { int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); + void (*config_channel) (struct ieee80211_hw *hw); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 499210f81845..147deba749e6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -473,6 +473,9 @@ #define REG_FAST_EDCA_CTRL 0x0460 #define REG_RD_RESP_PKT_TH 0x0463 #define REG_INIRTS_RATE_SEL 0x0480 +/* 8723bu */ +#define REG_DATA_SUBCHANNEL 0x0483 +/* 8723au */ #define REG_INIDATA_RATE_SEL 0x0484 #define REG_POWER_STATUS 0x04a4 @@ -658,6 +661,10 @@ #define REG_FWDLY 0x0661 #define REG_RXERR_RPT 0x0664 #define REG_WMAC_TRXPTCL_CTL 0x0668 +#define WMAC_TRXPTCL_CTL_BW_MASK (BIT(7) | BIT(8)) +#define WMAC_TRXPTCL_CTL_BW_20 0 +#define WMAC_TRXPTCL_CTL_BW_40 BIT(7) +#define WMAC_TRXPTCL_CTL_BW_80 BIT(8) /* Security */ #define REG_CAM_CMD 0x0670 @@ -872,6 +879,9 @@ #define REG_OFDM0_RX_IQ_EXT_ANTA 0x0ca0 +/* 8723bu */ +#define REG_OFDM0_TX_PSDO_NOISE_WEIGHT 0x0ce4 + #define REG_OFDM1_LSTF 0x0d00 #define OFDM_LSTF_PRIME_CH_LOW BIT(10) #define OFDM_LSTF_PRIME_CH_HIGH BIT(11) @@ -1030,6 +1040,10 @@ #define RF6052_REG_MODE_AG 0x18 /* RF channel and BW switch */ #define MODE_AG_CHANNEL_MASK 0x3ff #define MODE_AG_CHANNEL_20MHZ BIT(10) +#define MODE_AG_BW_MASK (BIT(10) | BIT(11)) +#define MODE_AG_BW_20MHZ_8723B (BIT(10) | BIT(11)) +#define MODE_AG_BW_40MHZ_8723B BIT(10) +#define MODE_AG_BW_80MHZ_8723B 0 #define RF6052_REG_TOP 0x19 #define RF6052_REG_RX_G1 0x1a -- cgit v1.2.3 From f37e9228aecaa9c92ee0433cd45ca0c545b90ce4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:41 -0500 Subject: rtl8xxxu: Initial rtl8723bu_init_bt() code This should initialize the antennas on the 8723bu, but so far I am still not receiving anything :( More work is needed. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 87 +++++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 15 +++- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 + 3 files changed, 98 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ada0f040f41a..fa8a92a68bc5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5603,10 +5603,87 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e); } -static void rtl8xxxu_init_bt(struct rtl8xxxu_priv *priv) +static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) { - if (!priv->has_bluetooth) - return; + struct h2c_cmd h2c; + u32 val32; + u8 val8; + + /* + * No indication anywhere as to what 0x0790 does. The 2 antenna + * vendor code preserves bits 6-7 here. + */ + rtl8xxxu_write8(priv, 0x0790, 0x05); + /* + * 0x0778 seems to be related to enabling the number of antennas + * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it + * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01 + */ + rtl8xxxu_write8(priv, 0x0778, 0x01); + + val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG); + val8 |= BIT(5); + rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8); + + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780); + + /* + * Set BT grant to low + */ + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.bt_grant.cmd = H2C_8723B_BT_GRANT; + h2c.bt_grant.data = 0; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant)); + + /* + * WLAN action by PTA + */ + rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c); + + /* + * BT select S0/S1 controlled by WiFi + */ + val8 = rtl8xxxu_read8(priv, 0x0067); + val8 |= BIT(5); + rtl8xxxu_write8(priv, 0x0067, val8); + + val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); + val32 |= BIT(11); + rtl8xxxu_write32(priv, REG_PWR_DATA, val32); + + /* + * Bits 6/7 are marked in/out ... but for what? + */ + rtl8xxxu_write8(priv, 0x0974, 0xff); + + val32 = rtl8xxxu_read32(priv, 0x0944); + val32 |= (BIT(0) | BIT(1)); + rtl8xxxu_write32(priv, 0x0944, val32); + + rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77); + + val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); + val32 &= ~BIT(24); + val32 |= BIT(23); + rtl8xxxu_write32(priv, REG_LEDCFG0, val32); + + /* + * Fix external switch Main->S1, Aux->S0 + */ + val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV; + h2c.ant_sel_rsv.ant_inverse = 1; + h2c.ant_sel_rsv.int_switch_type = 0; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv)); + + /* + * 0x280, 0x00, 0x200, 0x80 - not clear + */ + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x280); } static int rtl8xxxu_init_device(struct ieee80211_hw *hw) @@ -5933,7 +6010,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60); /* Init BT hw config. */ - rtl8xxxu_init_bt(priv); + if (priv->fops->init_bt) + priv->fops->init_bt(priv); /* Set NAV_UPPER to 30000us */ val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); @@ -7469,6 +7547,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, + .init_bt = rtl8723bu_init_bt, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index bce52cdc731a..bcbdc78e9de6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -696,8 +696,9 @@ enum h2c_cmd_8723b { H2C_8723B_BT_MP_OPER = 0x67, H2C_8723B_BT_CONTROL = 0x68, H2C_8723B_BT_WIFI_CTRL = 0x69, - H2C_8723B_BT_FW_PATCH = 0x6A, - H2C_8723B_BT_WLAN_CALIBRATION = 0x6D, + H2C_8723B_BT_FW_PATCH = 0x6a, + H2C_8723B_BT_WLAN_CALIBRATION = 0x6d, + H2C_8723B_BT_GRANT = 0x6e, /* * WOWLAN Class: 100 @@ -743,6 +744,15 @@ struct h2c_cmd { u8 cmd; u8 data; } __packed bt_wlan_calibration; + struct { + u8 cmd; + u8 ant_inverse; + u8 int_switch_type; + } __packed ant_sel_rsv; + struct { + u8 cmd; + u8 data; + } __packed bt_grant; }; }; @@ -862,6 +872,7 @@ struct rtl8xxxu_fileops { void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); void (*config_channel) (struct ieee80211_hw *hw); + void (*init_bt) (struct rtl8xxxu_priv *priv); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 147deba749e6..248f64c7f8e3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -721,6 +721,8 @@ #define REG_BT_CONTROL_8723BU 0x0764 #define BT_CONTROL_BT_GRANT BIT(12) +#define REG_WLAN_ACT_CONTROL_8723B 0x076e + #define REG_FPGA0_RF_MODE 0x0800 #define FPGA_RF_MODE BIT(0) #define FPGA_RF_MODE_JAPAN BIT(1) -- cgit v1.2.3 From 368633ce6853bb2ba678c7cb5bfeb103e7bc56ba Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:42 -0500 Subject: rtl8xxxu: Remove unused variable Remove an unused variable to make the compiler happy. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index fa8a92a68bc5..70543942e8d4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1781,7 +1781,7 @@ static void rtl8723bu_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; u32 val32, rsr; - u8 val8, opmode, subchannel; + u8 val8, subchannel; u16 rf_mode_bw; bool ht = true; int sec_ch_above, channel; -- cgit v1.2.3 From 3ca7b32c9d6da17a2660bc40b24f5dc0cb433cc9 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:43 -0500 Subject: rtl8xxxu: Improve 8723bu init code Implement additional init sequence code for the 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 45 ++++++++++++++++++---- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 8 ++++ .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 5 ++- 3 files changed, 49 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 70543942e8d4..91f189a85057 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2984,14 +2984,18 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) * addresses, which is initialized here. Do we need this? */ - val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); - udelay(2); - val8 |= AFE_PLL_320_ENABLE; - rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); - udelay(2); + if (priv->rtlchip == 0x8723b) { + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + } else { + val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); + udelay(2); + val8 |= AFE_PLL_320_ENABLE; + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8); + udelay(2); - rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff); - udelay(2); + rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff); + udelay(2); + } val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; @@ -5603,6 +5607,21 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e); } +static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv, + u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5) +{ + struct h2c_cmd h2c; + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.b_type_dma.cmd = H2C_8723B_B_TYPE_TDMA; + h2c.b_type_dma.data1 = arg1; + h2c.b_type_dma.data2 = arg2; + h2c.b_type_dma.data3 = arg3; + h2c.b_type_dma.data4 = arg4; + h2c.b_type_dma.data5 = arg5; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma)); +} + static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) { struct h2c_cmd h2c; @@ -5683,7 +5702,17 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) /* * 0x280, 0x00, 0x200, 0x80 - not clear */ - rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x280); + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + + /* + * Software control, antenna at WiFi side + */ + rtl8723bu_set_ps_tdma(priv, 0x00, 0x00, 0x00, 0x00, 0x00); + + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a); + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE4, 0x00000003); } static int rtl8xxxu_init_device(struct ieee80211_hw *hw) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index bcbdc78e9de6..c4c0b287127b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -740,6 +740,14 @@ struct h2c_cmd { u8 arg; __le16 mask_lo; } __packed ramask; + struct { + u8 cmd; + u8 data1; + u8 data2; + u8 data3; + u8 data4; + u8 data5; + } __packed b_type_dma; struct { u8 cmd; u8 data; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 248f64c7f8e3..a82c0ba7931d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -709,7 +709,10 @@ #define REG_BCN_PSR_RPT 0x06a8 #define REG_CALB32K_CTRL 0x06ac #define REG_PKT_MON_CTRL 0x06b4 -#define REG_BT_COEX_TABLE 0x06c0 +#define REG_BT_COEX_TABLE1 0x06c0 +#define REG_BT_COEX_TABLE2 0x06c4 +#define REG_BT_COEX_TABLE3 0x06c8 +#define REG_BT_COEX_TABLE4 0x06cc #define REG_WMAC_RESP_TXINFO 0x06d8 #define REG_MACID1 0x0700 -- cgit v1.2.3 From 7297f49c3761826242cd8a742f0f78a139d6653a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:44 -0500 Subject: rtl8xxxu: Do not ignore wlan activity on 8723bu The 8723bu is a WiFi/BT combo part. When initializing it for WiFi, make sure to tell it not to ignore WiFi activity. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 +++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++++ 2 files changed, 9 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 91f189a85057..c9cbea7342b7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5704,6 +5704,11 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; + h2c.ignore_wlan.data = 0; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); + /* * Software control, antenna at WiFi side */ diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index c4c0b287127b..5a6b2232e20a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -752,6 +752,10 @@ struct h2c_cmd { u8 cmd; u8 data; } __packed bt_wlan_calibration; + struct { + u8 cmd; + u8 data; + } __packed ignore_wlan; struct { u8 cmd; u8 ant_inverse; -- cgit v1.2.3 From a228a5db4d19b05da7315320e0b4a87ed7c472aa Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:45 -0500 Subject: rtl8xxxu: Set the right type for ps tdma on 8723bu Use the correct type when setting PS TDMA for 8723bu. This matches the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index c9cbea7342b7..0638d4655dfa 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5712,7 +5712,7 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) /* * Software control, antenna at WiFi side */ - rtl8723bu_set_ps_tdma(priv, 0x00, 0x00, 0x00, 0x00, 0x00); + rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a); -- cgit v1.2.3 From a6c80d211cdf0748a8ae4fd1ab41cf43d3ecbd71 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:46 -0500 Subject: rtl8xxxu: Add rtl8723bu (nextgen) rx descriptor definition The nextgen chips use a slightly different RX descriptor format. This adds support for the new format. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 125 +++++++++++++++++++++++ 1 file changed, 125 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 5a6b2232e20a..a8a4f2b451d1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -197,6 +197,131 @@ struct rtl8xxxu_rx_desc { #endif }; +struct rtl8723bu_rx_desc { +#ifdef __LITTLE_ENDIAN + u32 pktlen:14; + u32 crc32:1; + u32 icverr:1; + u32 drvinfo_sz:4; + u32 security:3; + u32 qos:1; + u32 shift:2; + u32 phy_stats:1; + u32 swdec:1; + u32 ls:1; + u32 fs:1; + u32 eor:1; + u32 own:1; + + u32 macid:7; + u32 dummy1_0:1; + u32 tid:4; + u32 dummy1_1:1; + u32 amsdu:1; + u32 rxid_match:1; + u32 paggr:1; + u32 a1fit:4; /* 16 */ + u32 chkerr:1; + u32 ipver:1; + u32 tcpudp:1; + u32 chkvld:1; + u32 pam:1; + u32 pwr:1; + u32 more_data:1; + u32 more_frag:1; + u32 type:2; + u32 mc:1; + u32 bc:1; + + u32 seq:12; + u32 frag:4; + u32 rx_is_qos:1; /* 16 */ + u32 dummy2_0:1; + u32 wlanhd_iv_len:6; + u32 dummy2_1:4; + u32 rpt_sel:1; + u32 dummy2_2:3; + + u32 rxmcs:7; + u32 dummy3_0:3; + u32 htc:1; + u32 eosp:1; + u32 bssidfit:2; + u32 dummy3_1:2; + u32 usb_agg_pktnum:8; /* 16 */ + u32 dummy3_2:5; + u32 pattern_match:1; + u32 unicast_match:1; + u32 magic_match:1; + + u32 splcp:1; + u32 ldcp:1; + u32 stbc:1; + u32 dummy4_0:1; + u32 bw:2; + u32 dummy4_1:26; +#else + u32 own:1; + u32 eor:1; + u32 fs:1; + u32 ls:1; + u32 swdec:1; + u32 phy_stats:1; + u32 shift:2; + u32 qos:1; + u32 security:3; + u32 drvinfo_sz:4; + u32 icverr:1; + u32 crc32:1; + u32 pktlen:14; + + u32 bc:1; + u32 mc:1; + u32 type:2; + u32 mf:1; + u32 md:1; + u32 pwr:1; + u32 pam:1; + u32 a2fit:4; + u32 a1fit:4; + u32 faggr:1; + u32 paggr:1; + u32 amsdu:1; + u32 hwrsvd:4; + u32 tid:4; + u32 macid:5; + + u32 dummy2_2:3; + u32 rpt_sel:1; + u32 dummy2_1:4; + u32 wlanhd_iv_len:6; + u32 dummy2_0:1; + u32 rx_is_qos:1; + u32 frag:4; /* 16 */ + u32 seq:12; + + u32 magic_match:1; + u32 unicast_match:1; + u32 pattern_match:1; + u32 dummy3_2:5; + u32 usb_agg_pktnum:8; + u32 dummy3_1:2; /* 16 */ + u32 bssidfit:2; + u32 eosp:1; + u32 htc:1; + u32 dummy3_0:3; + u32 rxmcs:7; + + u32 dumm4_1:26; + u32 bw:2; + u32 dummy4_0:1; + u32 stbc:1; + u32 ldcp:1; + u32 splcp:1; +#endif + __le32 tsfl; +}; + struct rtl8xxxu_tx_desc { __le16 pkt_size; u8 pkt_offset; -- cgit v1.2.3 From b18cdfdb67b258c01947e611b6bbb745188eb131 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:47 -0500 Subject: rtl8xxxu: Handle 8723bu style rx descriptors This adds code to parse the new RX descriptor format used by the 8723bu/8192eu parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 125 +++++++++++++++++------ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 8 ++ 2 files changed, 103 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 0638d4655dfa..8e6d54f18119 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6730,6 +6730,91 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work) } } +static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv, + struct sk_buff *skb, + struct ieee80211_rx_status *rx_status) +{ + struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; + struct rtl8723au_phy_stats *phy_stats; + int drvinfo_sz, desc_shift; + + skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc)); + + phy_stats = (struct rtl8723au_phy_stats *)skb->data; + + drvinfo_sz = rx_desc->drvinfo_sz * 8; + desc_shift = rx_desc->shift; + skb_pull(skb, drvinfo_sz + desc_shift); + + if (rx_desc->phy_stats) + rtl8xxxu_rx_parse_phystats(priv, rx_status, rx_desc, phy_stats); + + rx_status->mactime = le32_to_cpu(rx_desc->tsfl); + rx_status->flag |= RX_FLAG_MACTIME_START; + + if (!rx_desc->swdec) + rx_status->flag |= RX_FLAG_DECRYPTED; + if (rx_desc->crc32) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_desc->bw) + rx_status->flag |= RX_FLAG_40MHZ; + + if (rx_desc->rxht) { + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; + } else { + rx_status->rate_idx = rx_desc->rxmcs; + } + + return RX_TYPE_DATA_PKT; +} + +static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, + struct sk_buff *skb, + struct ieee80211_rx_status *rx_status) +{ + struct rtl8723bu_rx_desc *rx_desc = + (struct rtl8723bu_rx_desc *)skb->data; + struct rtl8723au_phy_stats *phy_stats; + int drvinfo_sz, desc_shift; + int rx_type; + + skb_pull(skb, sizeof(struct rtl8723bu_rx_desc)); + + phy_stats = (struct rtl8723au_phy_stats *)skb->data; + + drvinfo_sz = rx_desc->drvinfo_sz * 8; + desc_shift = rx_desc->shift; + skb_pull(skb, drvinfo_sz + desc_shift); + + rx_status->mactime = le32_to_cpu(rx_desc->tsfl); + rx_status->flag |= RX_FLAG_MACTIME_START; + + if (!rx_desc->swdec) + rx_status->flag |= RX_FLAG_DECRYPTED; + if (rx_desc->crc32) + rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; + if (rx_desc->bw) + rx_status->flag |= RX_FLAG_40MHZ; + + if (rx_desc->rxmcs >= DESC_RATE_MCS0) { + rx_status->flag |= RX_FLAG_HT; + rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; + } else { + rx_status->rate_idx = rx_desc->rxmcs; + } + + if (rx_desc->rpt_sel) { + struct device *dev = &priv->udev->dev; + dev_dbg(dev, "%s: C2H packet\n", __func__); + rx_type = RX_TYPE_C2H; + } else { + rx_type = RX_TYPE_DATA_PKT; + } + + return rx_type; +} + static void rtl8xxxu_rx_complete(struct urb *urb) { struct rtl8xxxu_rx_urb *rx_urb = @@ -6737,54 +6822,30 @@ static void rtl8xxxu_rx_complete(struct urb *urb) struct ieee80211_hw *hw = rx_urb->hw; struct rtl8xxxu_priv *priv = hw->priv; struct sk_buff *skb = (struct sk_buff *)urb->context; - struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data; - struct rtl8723au_phy_stats *phy_stats; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); struct device *dev = &priv->udev->dev; __le32 *_rx_desc_le = (__le32 *)skb->data; u32 *_rx_desc = (u32 *)skb->data; - int drvinfo_sz, desc_shift, i; + int rx_type, i; for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++) _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]); - drvinfo_sz = rx_desc->drvinfo_sz * 8; - desc_shift = rx_desc->shift; skb_put(skb, urb->actual_length); if (urb->status == 0) { - skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc)); - phy_stats = (struct rtl8723au_phy_stats *)skb->data; - - skb_pull(skb, drvinfo_sz + desc_shift); - memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); - if (rx_desc->phy_stats) - rtl8xxxu_rx_parse_phystats(priv, rx_status, - rx_desc, phy_stats); + rx_type = priv->fops->parse_rx_desc(priv, skb, rx_status); rx_status->freq = hw->conf.chandef.chan->center_freq; rx_status->band = hw->conf.chandef.chan->band; - rx_status->mactime = le32_to_cpu(rx_desc->tsfl); - rx_status->flag |= RX_FLAG_MACTIME_START; - - if (!rx_desc->swdec) - rx_status->flag |= RX_FLAG_DECRYPTED; - if (rx_desc->crc32) - rx_status->flag |= RX_FLAG_FAILED_FCS_CRC; - if (rx_desc->bw) - rx_status->flag |= RX_FLAG_40MHZ; - - if (rx_desc->rxht) { - rx_status->flag |= RX_FLAG_HT; - rx_status->rate_idx = rx_desc->rxmcs - DESC_RATE_MCS0; - } else { - rx_status->rate_idx = rx_desc->rxmcs; - } + if (rx_type == RX_TYPE_DATA_PKT) + ieee80211_rx_irqsafe(hw, skb); + else + dev_kfree_skb(skb); - ieee80211_rx_irqsafe(hw, skb); skb = NULL; rx_urb->urb.context = NULL; rtl8xxxu_queue_rx_urb(priv, rx_urb); @@ -7564,6 +7625,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, + .parse_rx_desc = rtl8723au_parse_rx_desc, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -7582,6 +7644,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, .init_bt = rtl8723bu_init_bt, + .parse_rx_desc = rtl8723bu_parse_rx_desc, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -7601,6 +7664,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, + .parse_rx_desc = rtl8723au_parse_rx_desc, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -7619,6 +7683,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .llt_init = rtl8xxxu_auto_llt_table, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, + .parse_rx_desc = rtl8723bu_parse_rx_desc, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index a8a4f2b451d1..4a3afe3c8260 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -61,6 +61,12 @@ #define EFUSE_BT_MAP_LEN_8723A 1024 #define EFUSE_MAX_WORD_UNIT 4 +enum rtl8xxxu_rx_type { + RX_TYPE_DATA_PKT = 0, + RX_TYPE_C2H = 1, + RX_TYPE_ERROR = -1 +}; + struct rtl8xxxu_rx_desc { #ifdef __LITTLE_ENDIAN u32 pktlen:14; @@ -1010,6 +1016,8 @@ struct rtl8xxxu_fileops { void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); void (*config_channel) (struct ieee80211_hw *hw); void (*init_bt) (struct rtl8xxxu_priv *priv); + int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb, + struct ieee80211_rx_status *rx_status); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; -- cgit v1.2.3 From b2b43b7837ba2e097bb019ee488ca5f4e0056fa4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:48 -0500 Subject: rtl8xxxu: Initial functionality to handle C2H events for 8723bu The 64 bit mailbox commands also provide a different method for mailbox command responses (C2H events). Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 34 ++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 54 ++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 8e6d54f18119..1f280930ad5c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6815,6 +6815,36 @@ static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, return rx_type; } +static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, + struct sk_buff *skb) +{ + struct rtl8723bu_c2h *c2h = (struct rtl8723bu_c2h *)skb->data; + struct device *dev = &priv->udev->dev; + int len; + + len = skb->len - 2; + + pr_info("%s: C2H ID %02x seq %02x, len %02x %02x\n", __func__, + c2h->id, c2h->seq, len, c2h->bt_info.response_source); + + switch(c2h->id) { + case C2H_8723B_BT_INFO: + if (c2h->bt_info.response_source > + BT_INFO_SRC_8723B_BT_ACTIVE_SEND) + dev_info(dev, "C2H_BT_INFO WiFi only firmware\n"); + else + dev_info(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n"); + + if (c2h->bt_info.bt_has_reset) + dev_info(dev, "BT has been reset\n"); + + break; + default: + pr_info("%s: Unhandled C2H event %02x\n", __func__, c2h->id); + break; + } +} + static void rtl8xxxu_rx_complete(struct urb *urb) { struct rtl8xxxu_rx_urb *rx_urb = @@ -6843,8 +6873,10 @@ static void rtl8xxxu_rx_complete(struct urb *urb) if (rx_type == RX_TYPE_DATA_PKT) ieee80211_rx_irqsafe(hw, skb); - else + else { + rtl8723bu_handle_c2h(priv, skb); dev_kfree_skb(skb); + } skb = NULL; rx_urb->urb.context = NULL; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 4a3afe3c8260..e4d5c4b29e3a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -899,6 +899,60 @@ struct h2c_cmd { }; }; +enum c2h_evt_8723b { + C2H_8723B_DEBUG = 0, + C2H_8723B_TSF = 1, + C2H_8723B_AP_RPT_RSP = 2, + C2H_8723B_CCX_TX_RPT = 3, + C2H_8723B_BT_RSSI = 4, + C2H_8723B_BT_OP_MODE = 5, + C2H_8723B_EXT_RA_RPT = 6, + C2H_8723B_BT_INFO = 9, + C2H_8723B_HW_INFO_EXCH = 10, + C2H_8723B_BT_MP_INFO = 11, + C2H_8723B_FW_DEBUG = 0xff, +}; + +enum bt_info_src_8723b { + BT_INFO_SRC_8723B_WIFI_FW = 0x0, + BT_INFO_SRC_8723B_BT_RSP = 0x1, + BT_INFO_SRC_8723B_BT_ACTIVE_SEND = 0x2, +}; + +struct rtl8723bu_c2h { + u8 id; + u8 seq; + union { + struct { + u8 payload[0]; + } __packed raw; + struct { + u8 response_source:4; + u8 dummy0_0:4; + + u8 bt_info; + + u8 retry_count:4; + u8 dummy2_0:1; + u8 bt_page:1; + u8 tx_rx_mask:1; + u8 dummy2_2:1; + + u8 rssi; + + u8 basic_rate:1; + u8 bt_has_reset:1; + u8 dummy4_1:1;; + u8 ignore_wlan:1; + u8 auto_report:1; + u8 dummy4_2:3; + + u8 a4; + u8 a5; + } __packed bt_info; + }; +}; + struct rtl8xxxu_fileops; struct rtl8xxxu_priv { -- cgit v1.2.3 From 394f1bd314cd08915fee9d5b36a2105dc1a543b7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:49 -0500 Subject: rtl8xxxu: Handle BT register writes and MP_OPER events 8723bu BT registers are written via the mailbox interface. Add support for writing these and corresponding C2H event responses. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 34 +++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 59 +++++++++++++++++++++++- 2 files changed, 89 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 1f280930ad5c..ec03e912e82c 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1531,6 +1531,27 @@ error: return retval; } +static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data) +{ + struct h2c_cmd h2c; + int reqnum = 0; + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER; + h2c.bt_mp_oper.operreq = 0 | (reqnum << 4); + h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE; + h2c.bt_mp_oper.data = data; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); + + reqnum++; + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER; + h2c.bt_mp_oper.operreq = 0 | (reqnum << 4); + h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE; + h2c.bt_mp_oper.addr = reg; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper)); +} + static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) { u8 val8; @@ -5646,6 +5667,8 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780); + rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */ + /* * Set BT grant to low */ @@ -6824,8 +6847,9 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, len = skb->len - 2; - pr_info("%s: C2H ID %02x seq %02x, len %02x %02x\n", __func__, - c2h->id, c2h->seq, len, c2h->bt_info.response_source); + dev_info(dev, "%s: C2H ID %02x seq %02x, len %02x source %02x\n", + __func__, + c2h->id, c2h->seq, len, c2h->bt_info.response_source); switch(c2h->id) { case C2H_8723B_BT_INFO: @@ -6837,8 +6861,14 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, if (c2h->bt_info.bt_has_reset) dev_info(dev, "BT has been reset\n"); + if (c2h->bt_info.tx_rx_mask) + dev_info(dev, "BT TRx mask\n"); break; + case C2H_8723B_BT_MP_INFO: + dev_info(dev, "C2H_MP_INFO ext ID %02x, status %02x\n", + c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); + break; default: pr_info("%s: Unhandled C2H event %02x\n", __func__, c2h->id); break; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index e4d5c4b29e3a..3e4e355ce2e2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -879,6 +879,13 @@ struct h2c_cmd { u8 data4; u8 data5; } __packed b_type_dma; + struct { + u8 cmd; + u8 operreq; + u8 opcode; + u8 data; + u8 addr; + } __packed bt_mp_oper; struct { u8 cmd; u8 data; @@ -908,8 +915,8 @@ enum c2h_evt_8723b { C2H_8723B_BT_OP_MODE = 5, C2H_8723B_EXT_RA_RPT = 6, C2H_8723B_BT_INFO = 9, - C2H_8723B_HW_INFO_EXCH = 10, - C2H_8723B_BT_MP_INFO = 11, + C2H_8723B_HW_INFO_EXCH = 0x0a, + C2H_8723B_BT_MP_INFO = 0x0b, C2H_8723B_FW_DEBUG = 0xff, }; @@ -919,6 +926,46 @@ enum bt_info_src_8723b { BT_INFO_SRC_8723B_BT_ACTIVE_SEND = 0x2, }; +enum bt_mp_oper_opcode_8723b { + BT_MP_OP_GET_BT_VERSION = 0x00, + BT_MP_OP_RESET = 0x01, + BT_MP_OP_TEST_CTRL = 0x02, + BT_MP_OP_SET_BT_MODE = 0x03, + BT_MP_OP_SET_CHNL_TX_GAIN = 0x04, + BT_MP_OP_SET_PKT_TYPE_LEN = 0x05, + BT_MP_OP_SET_PKT_CNT_L_PL_TYPE = 0x06, + BT_MP_OP_SET_PKT_CNT_H_PKT_INTV = 0x07, + BT_MP_OP_SET_PKT_HEADER = 0x08, + BT_MP_OP_SET_WHITENCOEFF = 0x09, + BT_MP_OP_SET_BD_ADDR_L = 0x0a, + BT_MP_OP_SET_BD_ADDR_H = 0x0b, + BT_MP_OP_WRITE_REG_ADDR = 0x0c, + BT_MP_OP_WRITE_REG_VALUE = 0x0d, + BT_MP_OP_GET_BT_STATUS = 0x0e, + BT_MP_OP_GET_BD_ADDR_L = 0x0f, + BT_MP_OP_GET_BD_ADDR_H = 0x10, + BT_MP_OP_READ_REG = 0x11, + BT_MP_OP_SET_TARGET_BD_ADDR_L = 0x12, + BT_MP_OP_SET_TARGET_BD_ADDR_H = 0x13, + BT_MP_OP_SET_TX_POWER_CALIBRATION = 0x14, + BT_MP_OP_GET_RX_PKT_CNT_L = 0x15, + BT_MP_OP_GET_RX_PKT_CNT_H = 0x16, + BT_MP_OP_GET_RX_ERROR_BITS_L = 0x17, + BT_MP_OP_GET_RX_ERROR_BITS_H = 0x18, + BT_MP_OP_GET_RSSI = 0x19, + BT_MP_OP_GET_CFO_HDR_QUALITY_L = 0x1a, + BT_MP_OP_GET_CFO_HDR_QUALITY_H = 0x1b, + BT_MP_OP_GET_TARGET_BD_ADDR_L = 0x1c, + BT_MP_OP_GET_TARGET_BD_ADDR_H = 0x1d, + BT_MP_OP_GET_AFH_MAP_L = 0x1e, + BT_MP_OP_GET_AFH_MAP_M = 0x1f, + BT_MP_OP_GET_AFH_MAP_H = 0x20, + BT_MP_OP_GET_AFH_STATUS = 0x21, + BT_MP_OP_SET_TRACKING_INTERVAL = 0x22, + BT_MP_OP_SET_THERMAL_METER = 0x23, + BT_MP_OP_ENABLE_CFO_TRACKING = 0x24, +}; + struct rtl8723bu_c2h { u8 id; u8 seq; @@ -926,6 +973,14 @@ struct rtl8723bu_c2h { struct { u8 payload[0]; } __packed raw; + struct { + u8 ext_id; + u8 status:4; + u8 retlen:4; + u8 opcode_ver:4; + u8 req_num:4; + u8 payload[2]; + } __packed bt_mp_info; struct { u8 response_source:4; u8 dummy0_0:4; -- cgit v1.2.3 From 6b9eae0129f43b8e691cec91a99601b0829fb841 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:50 -0500 Subject: rtl8xxxu: Issue BT_INFO command Issue a BT_INFO command to verify the status of BT/WiFi settings. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 18 +++++++++++------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++++ 2 files changed, 15 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ec03e912e82c..9b5b768ee33b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5727,20 +5727,25 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) */ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); - memset(&h2c, 0, sizeof(struct h2c_cmd)); - h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; - h2c.ignore_wlan.data = 0; - rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); - /* * Software control, antenna at WiFi side */ rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00); + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.bt_info.cmd = H2C_8723B_BT_INFO; + h2c.bt_info.data = BIT(0); + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info)); + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE4, 0x00000003); + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; + h2c.ignore_wlan.data = 0; + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); } static int rtl8xxxu_init_device(struct ieee80211_hw *hw) @@ -6847,8 +6852,7 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, len = skb->len - 2; - dev_info(dev, "%s: C2H ID %02x seq %02x, len %02x source %02x\n", - __func__, + dev_info(dev, "C2H ID %02x seq %02x, len %02x source %02x\n", c2h->id, c2h->seq, len, c2h->bt_info.response_source); switch(c2h->id) { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 3e4e355ce2e2..ea0b28a9a519 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -879,6 +879,10 @@ struct h2c_cmd { u8 data4; u8 data5; } __packed b_type_dma; + struct { + u8 cmd; + u8 data; + } __packed bt_info; struct { u8 cmd; u8 operreq; -- cgit v1.2.3 From 04313eb4f2225cb5dba2fffdb0f5f1abd71e611d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:51 -0500 Subject: rtl8xxxu: Do not set REG_AFE_XTAL_CTRL on 8723bu The 8723bu does not like REG_AFE_XTAL_CTRL being set, so skip this for now, to match the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 9b5b768ee33b..a41f7f0e5b0b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2998,6 +2998,7 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv, static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) { u8 val8, ldoa15, ldov12d, lpldo, ldohci12; + u16 val16; u32 val32; /* @@ -3018,16 +3019,18 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) udelay(2); } - val8 = rtl8xxxu_read8(priv, REG_SYS_FUNC); - val8 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; - rtl8xxxu_write8(priv, REG_SYS_FUNC, val8); + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); - /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ - val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); - val32 &= ~AFE_XTAL_RF_GATE; - if (priv->has_bluetooth) - val32 &= ~AFE_XTAL_BT_GATE; - rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32); + if (priv->rtlchip != 0x8723b) { + /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ + val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL); + val32 &= ~AFE_XTAL_RF_GATE; + if (priv->has_bluetooth) + val32 &= ~AFE_XTAL_BT_GATE; + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32); + } /* 6. 0x1f[7:0] = 0x07 */ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB; -- cgit v1.2.3 From 42836db1f609b66fdb1b307243fa01451748ab68 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:52 -0500 Subject: rtl8xxxu: Implement 8723bu power on sequence This implements the 8723bu specific power on sequence as it is different from that of the 8723au chips. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 183 ++++++++++++++++++++- .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 11 ++ 2 files changed, 189 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index a41f7f0e5b0b..1f30b3bb5daf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2933,10 +2933,6 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) val32 &= 0xffffff00; val32 |= 0x77; rtl8xxxu_write32(priv, 0x0930, val32); - - val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); - val32 |= BIT(11); - rtl8xxxu_write32(priv, REG_PWR_DATA, val32); } static int @@ -5359,6 +5355,127 @@ exit: return ret; } +static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u32 val32; + int count, ret = 0; + + /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */ + val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); + val8 |= LDOA15_ENABLE; + rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); + + /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/ + val8 = rtl8xxxu_read8(priv, 0x0067); + val8 &= ~BIT(4); + rtl8xxxu_write8(priv, 0x0067, val8); + + mdelay(1); + + /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */ + val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); + val8 &= ~SYS_ISO_ANALOG_IPS; + rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); + + /* Disable SW LPS 0x04[10]= 0 */ + val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO); + val32 &= ~APS_FSMCO_SW_LPS; + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + /* Wait until 0x04[17] = 1 power ready */ + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + if (val32 & BIT(17)) + break; + + udelay(10); + } + + if (!count) { + ret = -EBUSY; + goto exit; + } + + /* We should be able to optimize the following three entries into one */ + + /* Release WLON reset 0x04[16]= 1*/ + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + val32 |= APS_FSMCO_WLON_RESET; + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + /* Disable HWPDN 0x04[15]= 0*/ + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + val32 &= ~APS_FSMCO_HW_POWERDOWN; + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + /* Disable WL suspend*/ + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE); + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + /* Set, then poll until 0 */ + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + val32 |= APS_FSMCO_MAC_ENABLE; + rtl8xxxu_write32(priv, REG_APS_FSMCO, val32); + + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO); + if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) { + ret = 0; + break; + } + udelay(10); + } + + if (!count) { + ret = -EBUSY; + goto exit; + } + + /* Enable WL control XTAL setting */ + val8 = rtl8xxxu_read8(priv, REG_AFE_MISC); + val8 |= AFE_MISC_WL_XTAL_CTRL; + rtl8xxxu_write8(priv, REG_AFE_MISC, val8); + + /* Enable falling edge triggering interrupt */ + val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1); + val8 |= BIT(1); + rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8); + + /* Enable GPIO9 interrupt mode */ + val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1); + val8 |= BIT(1); + rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8); + + /* Enable GPIO9 input mode */ + val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2); + val8 &= ~BIT(1); + rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8); + + /* Enable HSISR GPIO[C:0] interrupt */ + val8 = rtl8xxxu_read8(priv, REG_HSIMR); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_HSIMR, val8); + + /* Enable HSISR GPIO9 interrupt */ + val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2); + val8 |= BIT(1); + rtl8xxxu_write8(priv, REG_HSIMR + 2, val8); + + val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL); + val8 |= MULTI_WIFI_HW_ROF_EN; + rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8); + + /* For GPIO9 internal pull high setting BIT(14) */ + val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1); + val8 |= BIT(6); + rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8); + +exit: + return ret; +} + static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv) { u8 val8; @@ -5430,6 +5547,62 @@ exit: return ret; } +static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + u32 val32; + int ret; + + rtl8723a_disabled_to_emu(priv); + + ret = rtl8723b_emu_to_active(priv); + if (ret) + goto exit; + + /* + * Enable MAC DMA/WMAC/SCHEDULE/SEC block + * Set CR bit10 to enable 32k calibration. + */ + val16 = rtl8xxxu_read16(priv, REG_CR); + val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE | + CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | + CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE | + CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE | + CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE); + rtl8xxxu_write16(priv, REG_CR, val16); + + /* + * BT coexist power on settings. This is identical for 1 and 2 + * antenna parts. + */ + rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20); + + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + + rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18); + rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04); + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); + /* Antenna inverse */ + rtl8xxxu_write8(priv, 0xfe08, 0x01); + + val16 = rtl8xxxu_read16(priv, REG_PWR_DATA); + val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; + rtl8xxxu_write16(priv, REG_PWR_DATA, val16); + + val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); + val32 |= LEDCFG0_DPDT_SELECT; + rtl8xxxu_write32(priv, REG_LEDCFG0, val32); + + val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); + val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA; + rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); +exit: + return ret; +} + #ifdef CONFIG_RTL8XXXU_UNTESTED static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv) @@ -7707,7 +7880,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { static struct rtl8xxxu_fileops rtl8723bu_fops = { .parse_efuse = rtl8723bu_parse_efuse, .load_firmware = rtl8723bu_load_firmware, - .power_on = rtl8723au_power_on, + .power_on = rtl8723bu_power_on, .llt_init = rtl8xxxu_auto_llt_table, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index a82c0ba7931d..5250388b0275 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -70,6 +70,8 @@ #define REG_EE_VPD 0x000c #define REG_AFE_MISC 0x0010 +#define AFE_MISC_WL_XTAL_CTRL BIT(6) + #define REG_SPS0_CTRL 0x0011 #define REG_SPS_OCP_CFG 0x0018 #define REG_8192E_LDOV12_CTRL 0x0014 @@ -133,6 +135,8 @@ #define EFUSE_ACCESS_DISABLE 0x00 /* RTL8723 only */ #define REG_PWR_DATA 0x0038 +#define PWR_DATA_EEPRPAD_RFE_CTRL_EN BIT(11) + #define REG_CAL_TIMER 0x003c #define REG_ACLK_MON 0x003e #define REG_GPIO_MUXCFG 0x0040 @@ -140,7 +144,10 @@ #define REG_MAC_PINMUX_CFG 0x0043 #define REG_GPIO_PIN_CTRL 0x0044 #define REG_GPIO_INTM 0x0048 +#define GPIO_INTM_EDGE_TRIG_IRQ BIT(9) + #define REG_LEDCFG0 0x004c +#define LEDCFG0_DPDT_SELECT BIT(23) #define REG_LEDCFG1 0x004d #define REG_LEDCFG2 0x004e #define LEDCFG2_DPDT_SELECT BIT(7) @@ -154,9 +161,13 @@ #define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS Multi-Function GPIO Select. */ #define REG_GPIO_IO_SEL_2 0x0062 +#define GPIO_IO_SEL_2_GPIO09_INPUT BIT(1) +#define GPIO_IO_SEL_2_GPIO09_IRQ BIT(9) /* RTL8723B */ #define REG_PAD_CTRL1 0x0064 +#define PAD_CTRL1_SW_DPDT_SEL_DATA BIT(0) + /* RTL8723 only WIFI/BT/GPS Multi-Function control source. */ #define REG_MULTI_FUNC_CTRL 0x0068 -- cgit v1.2.3 From 79fb5fe9edb78bd28969dd0f6bacc8c12550c4cc Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:53 -0500 Subject: rtl8xxxu: Setup LLT before downloading firmware This matches the order of the 8723bu vendor driver Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 29 ++++++++++++++++-------- 1 file changed, 20 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 1f30b3bb5daf..e836382e41c4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5955,6 +5955,26 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); if (!macpower) { + ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM); + if (ret) { + dev_warn(dev, "%s: LLT table init failed\n", __func__); + goto exit; + } + + /* + * Presumably this is for 8188EU as well + * Enable TX report and TX report timer + */ + if (priv->rtlchip == 0x8723bu) { + val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); + val8 |= BIT(1); + rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); + /* Set MAX RPT MACID */ + rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02); + /* TX report Timer. Unit: 32us */ + rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0); + } + if (priv->ep_tx_normal_queue) val8 = TX_PAGE_NUM_NORM_PQ; else @@ -5996,15 +6016,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - dev_dbg(dev, "%s: macpower %i\n", __func__, macpower); - if (!macpower) { - ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM); - if (ret) { - dev_warn(dev, "%s: LLT table init failed\n", __func__); - goto exit; - } - } - /* Fix USB interface interference issue */ if (priv->rtlchip == 0x8723a) { rtl8xxxu_write8(priv, 0xfe40, 0xe0); -- cgit v1.2.3 From 8baf670b8928893bacf95a67c86ec2d6c42263f2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:54 -0500 Subject: rtl8xxxu: Additional fixes for 8723bu Additional tweaks to further map the init sequence for the 8723bu to that of the vendor driver. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 25 +++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e836382e41c4..e879fe28a99b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2957,7 +2957,8 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array) } } - rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a); + if (priv->rtlchip != 0x8723b) + rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a); return 0; } @@ -3003,6 +3004,11 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) */ if (priv->rtlchip == 0x8723b) { + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | + SYS_FUNC_DIO_RF; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00); } else { val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL); @@ -3013,11 +3019,11 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff); udelay(2); - } - val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); - val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; - rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + } if (priv->rtlchip != 0x8723b) { /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */ @@ -3036,9 +3042,14 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table); else if (priv->tx_paths == 2) rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table); - else if (priv->rtlchip == 0x8723b) + else if (priv->rtlchip == 0x8723b) { + /* + * Why? + */ + rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3); + rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80); rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table); - else + } else rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table); -- cgit v1.2.3 From 4ef22eb933c21f307413f9fc402f83e7384cf726 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:55 -0500 Subject: rtl8xxxu: Handle XTAL_K value in efuse specific location Retrieve the XTAL_K value in the parse_efuse() functions as it's location various on a per device basis. For parts that do not provide an XTAL_K value, skip setting it. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 15 ++++++++++++--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 ++ 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e879fe28a99b..7f69a1b6980d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2314,6 +2314,10 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) efuse->ht20_max_power_offset, sizeof(priv->ht20_max_power_offset)); + if (priv->efuse_wifi.efuse8723.version >= 0x01) { + priv->has_xtalk = 1; + priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f; + } dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.41s\n", @@ -2340,6 +2344,9 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, sizeof(priv->ht40_1s_tx_power_index_B)); + priv->has_xtalk = 1; + priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f; + dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name); @@ -2444,6 +2451,9 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) ether_addr_copy(priv->mac_addr, efuse->mac_addr); + priv->has_xtalk = 1; + priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f; + dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name); dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial); @@ -3122,11 +3132,10 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) else rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table); - if ((priv->rtlchip == 0x8723a || priv->rtlchip == 0x8723b) && - priv->efuse_wifi.efuse8723.version >= 0x01) { + if (priv->has_xtalk) { val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL); - val8 = priv->efuse_wifi.efuse8723.xtal_k & 0x3f; + val8 = priv->xtalk; val32 &= 0xff000fff; val32 |= ((val8 | (val8 << 6)) << 12); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index ea0b28a9a519..2250e1b17d60 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1063,6 +1063,8 @@ struct rtl8xxxu_priv { u32 ep_tx_high_queue:1; u32 ep_tx_normal_queue:1; u32 ep_tx_low_queue:1; + u32 has_xtalk:1; + u8 xtalk; unsigned int pipe_interrupt; unsigned int pipe_in; unsigned int pipe_out[TXDESC_QUEUE_MAX]; -- cgit v1.2.3 From a0e262bcbe71892c88abb12b6d409553863efacc Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:56 -0500 Subject: rtl8xxxu: Another 8723bu patch for rtl8xxxu_init_phy_bb() This function is going to need to be split up into chip specific variants. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 7f69a1b6980d..3f742b74f293 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -3142,13 +3142,16 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32); } - ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; - ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); - ldohci12 = 0x57; - lpldo = 1; - val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15; + if (priv->rtlchip != 0x8723bu) { + ldoa15 = LDOA15_ENABLE | LDOA15_OBUF; + ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT); + ldohci12 = 0x57; + lpldo = 1; + val32 = (lpldo << 24) | (ldohci12 << 16) | + (ldov12d << 8) | ldoa15; - rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32); + rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32); + } return 0; } -- cgit v1.2.3 From 360157eb25d4088c28ff114a613d2c95b61e93f4 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:57 -0500 Subject: rtl8xxxu: Another 8723bu magic register set during init No indication of what register 0xa3 does anywhere in the vendor source. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3f742b74f293..f5f38a51486f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5996,6 +5996,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02); /* TX report Timer. Unit: 32us */ rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0); + + /* tmp ps ? */ + val8 = rtl8xxxu_read8(priv, 0xa3); + val8 &= 0xf8; + rtl8xxxu_write8(priv, 0xa3, val8); } if (priv->ep_tx_normal_queue) -- cgit v1.2.3 From 3a4be6a092c89ad7a94b147c3529d6626e5a59ff Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:58 -0500 Subject: rtl8xxxu: Init H2C command register for 8723bu In addition make register read/write flow match closer to vendor driver flow. This is mainly to be able to compare the register write log with the vendor driver, and can be optimized later once 8723bu support is working. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index f5f38a51486f..1d0869b55c64 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2685,6 +2685,11 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) goto exit; } + /* + * Init H2C command + */ + if (priv->rtlchip == 0x8723b) + rtl8xxxu_write8(priv, REG_HMTFR, 0x0f); exit: return ret; } @@ -2927,14 +2932,20 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 &= ~BIT(4); + rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); + + val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG); val32 |= BIT(3); rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32); val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); - val32 &= ~BIT(23); val32 |= BIT(24); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); + val32 = rtl8xxxu_read32(priv, REG_LEDCFG0); + val32 &= ~BIT(23); + rtl8xxxu_write32(priv, REG_LEDCFG0, val32); + val32 = rtl8xxxu_read32(priv, 0x0944); val32 |= (BIT(0) | BIT(1)); rtl8xxxu_write32(priv, 0x0944, val32); @@ -2943,6 +2954,10 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) val32 &= 0xffffff00; val32 |= 0x77; rtl8xxxu_write32(priv, 0x0930, val32); + + val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); + val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; + rtl8xxxu_write32(priv, REG_PWR_DATA, val32); } static int @@ -6079,7 +6094,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, 0xfe42, 0x80); } - if (priv->rtlchip == 0x8192e || priv->rtlchip == 0x8723b) { + if (priv->rtlchip == 0x8192e) { rtl8xxxu_write32(priv, REG_HIMR0, 0x00); rtl8xxxu_write32(priv, REG_HIMR1, 0x00); } -- cgit v1.2.3 From f30ed675545dd4b9f32d3d674054b6024b413f1d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:04:59 -0500 Subject: rtl8xxxu: 80M spur hack is for 8723au only Only apply the 80M spur hack for 8723au parts. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 1d0869b55c64..9ba4cadd1549 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6151,11 +6151,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - /* Reduce 80M spur */ - rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); - rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + if (priv->rtlchip == 0x8723a) { /* Reduce 80M spur */ + rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); + rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + } /* RFSW Control - clear bit 14 ?? */ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); -- cgit v1.2.3 From 1f1b20f11ab4ce38a338b278bb6b538bbfdc831a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:00 -0500 Subject: rtl8xxxu: Do queue init in same order as 8723bu vendor driver Reorganize the init sequence in order to be able to compare to the 8723bu vendor driver's init sequence. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 68 +++++++++++++----------- 1 file changed, 37 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 9ba4cadd1549..27fb89fe99ec 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6017,32 +6017,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) val8 &= 0xf8; rtl8xxxu_write8(priv, 0xa3, val8); } - - if (priv->ep_tx_normal_queue) - val8 = TX_PAGE_NUM_NORM_PQ; - else - val8 = 0; - - rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); - - val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; - - if (priv->ep_tx_high_queue) - val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); - if (priv->ep_tx_low_queue) - val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); - - rtl8xxxu_write32(priv, REG_RQPN, val32); - - /* - * Set TX buffer boundary - */ - val8 = TX_TOTAL_PAGE_NUM + 1; - rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); - rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8); - rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8); - rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8); - rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); } ret = rtl8xxxu_download_firmware(priv); @@ -6054,11 +6028,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - ret = rtl8xxxu_init_queue_priority(priv); - dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); - if (ret) - goto exit; - /* Fix USB interface interference issue */ if (priv->rtlchip == 0x8723a) { rtl8xxxu_write8(priv, 0xfe40, 0xe0); @@ -6158,6 +6127,43 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); } + if (!macpower){ + if (priv->ep_tx_normal_queue) + val8 = TX_PAGE_NUM_NORM_PQ; + else + val8 = 0; + + rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8); + + val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD; + + if (priv->ep_tx_high_queue) + val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT); + if (priv->ep_tx_low_queue) + val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT); + + rtl8xxxu_write32(priv, REG_RQPN, val32); + + /* + * Set TX buffer boundary + */ + val8 = TX_TOTAL_PAGE_NUM + 1; + + if (priv->rtlchip == 0x8723b) + val8 -= 1; + + rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8); + rtl8xxxu_write8(priv, REG_TXPKTBUF_MGQ_BDNY, val8); + rtl8xxxu_write8(priv, REG_TXPKTBUF_WMAC_LBK_BF_HD, val8); + rtl8xxxu_write8(priv, REG_TRXFF_BNDY, val8); + rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8); + } + + ret = rtl8xxxu_init_queue_priority(priv); + dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret); + if (ret) + goto exit; + /* RFSW Control - clear bit 14 ?? */ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); /* 0x07000760 */ -- cgit v1.2.3 From b87212ceceef655dae563efd2df13cb59dbd1711 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:01 -0500 Subject: rtl8xxxu: Do not set FPGA0_TX_INFO for 8723bu and use a larger PBP page size The vendor driver does not set FPGA0_TX_INFO here. In additiona the 8723bu can handler a larger PBP page size. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 27fb89fe99ec..bb43937788ce 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6165,7 +6165,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) goto exit; /* RFSW Control - clear bit 14 ?? */ - rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); + if (priv->rtlchip != 0x8723b) + rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003); /* 0x07000760 */ val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE | @@ -6185,8 +6186,12 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Transfer page size is always 128 */ - val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) | - (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT); + if (priv->rtlchip == 0x8723b) + val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) | + (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT); + else + val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) | + (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT); rtl8xxxu_write8(priv, REG_PBP, val8); /* -- cgit v1.2.3 From fadfa041546924c28ca2a3bf1ed8fd25686e8cf5 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:02 -0500 Subject: rtl8xxxu: Set RX boundary for 8723bu Set the correct TRXFF boundary for 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index bb43937788ce..6611128532f4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6182,7 +6182,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Set RX page boundary */ - rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); + if (priv->rtlchip == 0x8723b) + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f); + else + rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff); /* * Transfer page size is always 128 */ -- cgit v1.2.3 From c36906044104813304a9c40252f9b2a573ccd187 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:03 -0500 Subject: rtl8xxxu: Initialize burst parameters for 8723bu Implement burst parameter sequence for 8723bu parts. Eventually this should be moved into device specific sections. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 33 ++++++++++++++++++++++ .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 5 +++- 2 files changed, 37 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6611128532f4..631482555bee 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6278,6 +6278,39 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, REG_BEACON_DMA_TIME, BEACON_DMA_ATIME_INT_TIME); rtl8xxxu_write16(priv, REG_BEACON_TCFG, 0x660F); + /* + * Initialize burst parameters + */ + if (priv->rtlchip == 0x8723b) { + /* + * For USB high speed set 512B packets + */ + val8 = rtl8xxxu_read8(priv, REG_RXDMA_PRO_8723B); + val8 &= ~(BIT(4) | BIT(5)); + val8 |= BIT(4); + val8 |= BIT(1) | BIT(2) | BIT(3); + rtl8xxxu_write8(priv, REG_RXDMA_PRO_8723B, val8); + + /* + * For USB high speed set 512B packets + */ + val8 = rtl8xxxu_read8(priv, REG_HT_SINGLE_AMPDU_8723B); + val8 |= BIT(7); + rtl8xxxu_write8(priv, REG_HT_SINGLE_AMPDU_8723B, val8); + + rtl8xxxu_write16(priv, REG_MAX_AGGR_NUM, 0x0c14); + rtl8xxxu_write8(priv, REG_AMPDU_MAX_TIME_8723B, 0x5e); + rtl8xxxu_write32(priv, REG_AGGLEN_LMT, 0xffffffff); + rtl8xxxu_write8(priv, REG_RX_PKT_LIMIT, 0x18); + rtl8xxxu_write8(priv, REG_PIFS, 0x00); + rtl8xxxu_write8(priv, REG_USTIME_TSF_8723B, 0x50); + rtl8xxxu_write8(priv, REG_USTIME_EDCA, 0x50); + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); + val8 |= BIT(5) | BIT(6); + rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); + } + /* * Enable CCK and OFDM block */ diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 5250388b0275..8209830848d6 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -405,7 +405,7 @@ /* Presumably only found on newer chips such as 8723bu */ #define REG_RX_DMA_CTRL_8723B 0x0286 -#define REG_RX_DMA_MODE_CTRL_8723B 0x0290 +#define REG_RXDMA_PRO_8723B 0x0290 #define REG_RF_BB_CMD_ADDR 0x02c0 #define REG_RF_BB_CMD_DATA 0x02c4 @@ -478,6 +478,7 @@ #define REG_ARFR1 0x0448 #define REG_ARFR2 0x044c #define REG_ARFR3 0x0450 +#define REG_AMPDU_MAX_TIME_8723B 0x0456 #define REG_AGGLEN_LMT 0x0458 #define REG_AMPDU_MIN_SPACE 0x045c #define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045d @@ -496,6 +497,7 @@ #define REG_PKT_VO_VI_LIFE_TIME 0x04c0 #define REG_PKT_BE_BK_LIFE_TIME 0x04c2 #define REG_STBC_SETTING 0x04c4 +#define REG_HT_SINGLE_AMPDU_8723B 0x04c7 #define REG_PROT_MODE_CTRL 0x04c8 #define REG_MAX_AGGR_NUM 0x04ca #define REG_RTS_MAX_AGGR_NUM 0x04cb @@ -560,6 +562,7 @@ #define BEACON_DMA_ATIME_INT_TIME 2 #define REG_ATIMWND 0x055a +#define REG_USTIME_TSF_8723B 0x055c #define REG_BCN_MAX_ERR 0x055d #define REG_RXTSF_OFFSET_CCK 0x055e #define REG_RXTSF_OFFSET_OFDM 0x055f -- cgit v1.2.3 From 1ea8e846c9eab861db1108302a391182c3fce528 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:04 -0500 Subject: rtl8xxxu: Call device specific _config_channel() Having a version for the newer chips without calling it doesn't do much good..... Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 631482555bee..960e1acd6ad7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7348,7 +7348,7 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) rtl8723a_set_tx_power(priv, channel, ht40); - rtl8723au_config_channel(hw); + priv->fops->config_channel(hw); } exit: -- cgit v1.2.3 From 5ac61789e864ca7fbfe4425afe53837f535363cc Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:05 -0500 Subject: rtl8xxxu: 8723bu lock phy after RF init Set PHY lock after running the RF init sequence on 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 960e1acd6ad7..f20568b14c3b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6093,6 +6093,13 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) case 0x8723b: rftable = rtl8723bu_radioa_1t_init_table; ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A); + /* + * PHY LCK + */ + rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0); + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01); + msleep(200); + rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0); break; case 0x8188c: if (priv->hi_pa) -- cgit v1.2.3 From 7e9567ff98a6b7312a6eb1fe0f9c0f81b42147eb Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:06 -0500 Subject: rtl8xxxu: Add REG_DWBCN1_CTRL_8723B define List yet another new register found on the 8723b. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index 8209830848d6..ec6f40f2dbc7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -397,6 +397,8 @@ #define REG_AUTO_LLT 0x0224 #define AUTO_LLT_INIT_LLT BIT(16) +#define REG_DWBCN1_CTRL_8723B 0x0228 + /* 0x0280 ~ 0x02FF RXDMA Configuration */ #define REG_RXDMA_AGG_PG_TH 0x0280 #define RXDMA_USB_AGG_ENABLE BIT(31) -- cgit v1.2.3 From 2f109c8e51d6302a18687c9381a62401d6b69f24 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:07 -0500 Subject: rtl8xxxu: Group chip quirks together Group chip quirks together instead of having them scattered all over in the init code. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 28 +++++++++++++----------- 1 file changed, 15 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index f20568b14c3b..ad6371985402 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6028,18 +6028,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - /* Fix USB interface interference issue */ - if (priv->rtlchip == 0x8723a) { - rtl8xxxu_write8(priv, 0xfe40, 0xe0); - rtl8xxxu_write8(priv, 0xfe41, 0x8d); - rtl8xxxu_write8(priv, 0xfe42, 0x80); - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); - } else { - val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); - val32 |= TXDMA_OFFSET_DROP_DATA_EN; - rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); - } - /* Solve too many protocol error on USB bus */ /* Can't do this for 8188/8192 UMC A cut parts */ if (priv->rtlchip == 0x8723a || @@ -6127,11 +6115,25 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) if (ret) goto exit; - if (priv->rtlchip == 0x8723a) { /* Reduce 80M spur */ + /* + * Chip specific quirks + */ + if (priv->rtlchip == 0x8723a) { + /* Fix USB interface interference issue */ + rtl8xxxu_write8(priv, 0xfe40, 0xe0); + rtl8xxxu_write8(priv, 0xfe41, 0x8d); + rtl8xxxu_write8(priv, 0xfe42, 0x80); + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320); + + /* Reduce 80M spur */ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d); rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82); rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83); + } else { + val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK); + val32 |= TXDMA_OFFSET_DROP_DATA_EN; + rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); } if (!macpower){ -- cgit v1.2.3 From 3e88ca447a8a17e122ba58d79c725c63b3efcd70 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:08 -0500 Subject: rtl8xxxu: Setup RX aggregation This initializes RX DMA aggregation on 8723bu. We should do this for all parts eventually, and also init TX aggregation. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 23 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 1 + 3 files changed, 25 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ad6371985402..04dad19402e5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5962,6 +5962,25 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); } +static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv) +{ + u32 agg_rx; + u8 agg_ctrl; + + /* + * For now simply disable RX aggregation + */ + agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL); + agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN; + + agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH); + agg_rx &= ~RXDMA_USB_AGG_ENABLE; + agg_rx &= ~0xff0f; + + rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl); + rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); +} + static int rtl8xxxu_init_device(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; @@ -6320,6 +6339,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); } + if (priv->fops->init_aggregation) + priv->fops->init_aggregation(priv); + /* * Enable CCK and OFDM block */ @@ -7998,6 +8020,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .config_channel = rtl8723bu_config_channel, .init_bt = rtl8723bu_init_bt, .parse_rx_desc = rtl8723bu_parse_rx_desc, + .init_aggregation = rtl8723bu_init_aggregation, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 2250e1b17d60..14b7a35bc05b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1133,6 +1133,7 @@ struct rtl8xxxu_fileops { void (*init_bt) (struct rtl8xxxu_priv *priv); int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb, struct ieee80211_rx_status *rx_status); + void (*init_aggregation) (struct rtl8xxxu_priv *priv); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index ec6f40f2dbc7..d6c7ad30a87b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -318,6 +318,7 @@ #define PBP_PAGE_SIZE_1024 0x4 #define REG_TRXDMA_CTRL 0x010c +#define TRXDMA_CTRL_RXDMA_AGG_EN BIT(2) #define TRXDMA_CTRL_VOQ_SHIFT 4 #define TRXDMA_CTRL_VIQ_SHIFT 6 #define TRXDMA_CTRL_BEQ_SHIFT 8 -- cgit v1.2.3 From f2a4163a22c66d178084ff722c490f5ecf91a088 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:09 -0500 Subject: rtl8xxxu: Add missing blank space in front of bracket Keep the automated tools happy Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 04dad19402e5..1fa0dbfdef84 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6155,7 +6155,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32); } - if (!macpower){ + if (!macpower) { if (priv->ep_tx_normal_queue) val8 = TX_PAGE_NUM_NORM_PQ; else -- cgit v1.2.3 From 9c79bf95d08a920b1c1219a2a9d5fce2f6172bbe Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:10 -0500 Subject: rtl8xxxu: Implement init_statistics for 8723bu Vendor driver implements this for 8723b and 8821 series Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 27 ++++++++++++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 7 ++++++ 3 files changed, 35 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 1fa0dbfdef84..6b6fa1f24696 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5981,6 +5981,29 @@ static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx); } +static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv) +{ + u32 val32; + + /* Time duration for NHM unit: 4us, 0x2710=40ms */ + rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710); + rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff); + rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52); + rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff); + /* TH8 */ + val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK); + val32 |= 0xff; + rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32); + /* Enable CCK */ + val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B); + val32 |= BIT(8) | BIT(9) | BIT(10); + rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32); + /* Max power amongst all RX antennas */ + val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC); + val32 |= BIT(7); + rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); +} + static int rtl8xxxu_init_device(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; @@ -6371,6 +6394,9 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) rtl8xxxu_write16(priv, REG_FAST_EDCA_CTRL, 0); + if (priv->fops->init_statistics) + priv->fops->init_statistics(priv); + rtl8723a_phy_lc_calibrate(priv); priv->fops->phy_iq_calibrate(priv); @@ -8021,6 +8047,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_bt = rtl8723bu_init_bt, .parse_rx_desc = rtl8723bu_parse_rx_desc, .init_aggregation = rtl8723bu_init_aggregation, + .init_statistics = rtl8723bu_init_statistics, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 14b7a35bc05b..b75678b9b60d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1134,6 +1134,7 @@ struct rtl8xxxu_fileops { int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb, struct ieee80211_rx_status *rx_status); void (*init_aggregation) (struct rtl8xxxu_priv *priv); + void (*init_statistics) (struct rtl8xxxu_priv *priv); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index d6c7ad30a87b..d3f6fa376625 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -833,6 +833,11 @@ #define REG_FPGA0_ANALOG3 0x0888 #define REG_FPGA0_ANALOG4 0x088c +#define REG_NHM_TH9_TH10_8723B 0x0890 +#define REG_NHM_TIMER_8723B 0x0894 +#define REG_NHM_TH3_TO_TH0_8723B 0x0898 +#define REG_NHM_TH7_TO_TH4_8723B 0x089c + #define REG_FPGA0_XA_LSSI_READBACK 0x08a0 /* Tranceiver LSSI Readback */ #define REG_FPGA0_XB_LSSI_READBACK 0x08a4 #define REG_HSPI_XA_READBACK 0x08b8 /* Transceiver A HSPI read */ @@ -869,6 +874,8 @@ #define REG_OFDM0_TR_MUX_PAR 0x0c08 +#define REG_OFDM0_FA_RSTC 0x0c0c + #define REG_OFDM0_XA_RX_IQ_IMBALANCE 0x0c14 #define REG_OFDM0_XB_RX_IQ_IMBALANCE 0x0c1c -- cgit v1.2.3 From 541bca7f5d8ffbd4dba8b432f3a5162cb7ba41a6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:11 -0500 Subject: rtl8xxxu: RF_T_METER is different on the newer chips Provide RF_T_METER register location for nextgen chips. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index d3f6fa376625..a055362471e5 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -1104,9 +1104,10 @@ /* * NextGen regs: 8723BU */ -#define RF6052_REG_UNKNOWN_43 0x43 -#define RF6052_REG_UNKNOWN_55 0x55 -#define RF6052_REG_S0S1 0xb0 -#define RF6052_REG_UNKNOWN_DF 0xdf -#define RF6052_REG_UNKNOWN_ED 0xed -#define RF6052_REG_WE_LUT 0xef +#define RF6052_REG_T_METER_8723B 0x42 +#define RF6052_REG_UNKNOWN_43 0x43 +#define RF6052_REG_UNKNOWN_55 0x55 +#define RF6052_REG_S0S1 0xb0 +#define RF6052_REG_UNKNOWN_DF 0xdf +#define RF6052_REG_UNKNOWN_ED 0xed +#define RF6052_REG_WE_LUT 0xef -- cgit v1.2.3 From fc1c89b3407fe0b3ebc8fa651332a62ed773872d Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:12 -0500 Subject: rtl8xxxu: Set WLAN_ACT_CONTROL per vendor driver setting The initial code set the wrong setting in WLAN_ACT_CONTROL for the 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 6b6fa1f24696..52c75675d3b2 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5894,7 +5894,7 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) /* * WLAN action by PTA */ - rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x0c); + rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04); /* * BT select S0/S1 controlled by WiFi -- cgit v1.2.3 From 499cfc02a08a27ba29d22938ddbe5140806316c5 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:13 -0500 Subject: rtl8xxxu: 8723bu: REG_BT_COEX_TABLE4 is only 8 bits The BT_COEX_TABLE register list contains 3 32 bit registers and one 8 bit register. Hence, use rtl8xxxu_write8() when writing the 8 bit register. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 52c75675d3b2..47d85d57a175 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5954,7 +5954,7 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a); rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); - rtl8xxxu_write32(priv, REG_BT_COEX_TABLE4, 0x00000003); + rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; -- cgit v1.2.3 From 120e627f658b3fe2be2f3699527dd687168b3693 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:14 -0500 Subject: rtl8xxxu: Use name for REG_RFE_BUFFER rather than hard coded value Register 0x0944 is REG_RFE_BUFFER. Use the name rather than hard coded value when accessing it. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 47d85d57a175..3764f3e1b3dd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2946,9 +2946,9 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) val32 &= ~BIT(23); rtl8xxxu_write32(priv, REG_LEDCFG0, val32); - val32 = rtl8xxxu_read32(priv, 0x0944); + val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 |= (BIT(0) | BIT(1)); - rtl8xxxu_write32(priv, 0x0944, val32); + rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); val32 = rtl8xxxu_read32(priv, 0x0930); val32 &= 0xffffff00; @@ -5912,9 +5912,9 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) */ rtl8xxxu_write8(priv, 0x0974, 0xff); - val32 = rtl8xxxu_read32(priv, 0x0944); + val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER); val32 |= (BIT(0) | BIT(1)); - rtl8xxxu_write32(priv, 0x0944, val32); + rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77); -- cgit v1.2.3 From 59b743979c178b780bc57f179493fdbf3ad4f0b7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:15 -0500 Subject: rtl8xxxu: Use REG_RFE_CTRL_ANTA_SRC rather than hard coded value Another case where we should use the register name rather than the hard coded value when accessing it. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3764f3e1b3dd..5b74ba114d53 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2950,10 +2950,10 @@ static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv) val32 |= (BIT(0) | BIT(1)); rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32); - val32 = rtl8xxxu_read32(priv, 0x0930); + val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC); val32 &= 0xffffff00; val32 |= 0x77; - rtl8xxxu_write32(priv, 0x0930, val32); + rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32); val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; -- cgit v1.2.3 From a3a5dac6b1bf69b1d90f3293abfb4392ec2fc68f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:16 -0500 Subject: rtl8xxxu: Setup coex table correctly (hopefully) Use the same values as the vendor driver when setting up the BTCOEX table for 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5b74ba114d53..3e99a17c7962 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5842,6 +5842,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e); } +#ifdef NEED_PS_TDMA static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv, u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5) { @@ -5856,6 +5857,7 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv, h2c.b_type_dma.data5 = arg5; rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma)); } +#endif static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) { @@ -5944,18 +5946,20 @@ static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) /* * Software control, antenna at WiFi side */ +#ifdef NEED_PS_TDMA rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00); +#endif + + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555); + rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); + rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03); memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.bt_info.cmd = H2C_8723B_BT_INFO; h2c.bt_info.data = BIT(0); rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info)); - rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555); - rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x5a5a5a5a); - rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff); - rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03); - memset(&h2c, 0, sizeof(struct h2c_cmd)); h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT; h2c.ignore_wlan.data = 0; -- cgit v1.2.3 From db08de9443be5d4eef724621d3d51cab4daf25d3 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:17 -0500 Subject: rtl8xxxu: Do not use hard-wired RF enable settings for 8723bu These settings simply block the 8723bu, for now leave an empty function. With this change we can finally communicate with aliens using the 8723bu! Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 10 +++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3e99a17c7962..5b6b9c5efc86 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1596,6 +1596,10 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } +static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) +{ +} + static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv) { u8 sps0; @@ -7665,7 +7669,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw) init_usb_anchor(&priv->tx_anchor); init_usb_anchor(&priv->int_anchor); - rtl8723a_enable_rf(priv); + priv->fops->enable_rf(priv); if (priv->usb_interrupts) { ret = rtl8xxxu_submit_int_urb(hw); if (ret) @@ -8031,6 +8035,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8723au_parse_rx_desc, + .enable_rf = rtl8723a_enable_rf, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8052,6 +8057,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .parse_rx_desc = rtl8723bu_parse_rx_desc, .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, + .enable_rf = rtl8723b_enable_rf, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -8072,6 +8078,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8723au_parse_rx_desc, + .enable_rf = rtl8723a_enable_rf, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8091,6 +8098,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8723bu_parse_rx_desc, + .enable_rf = rtl8723b_enable_rf, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index b75678b9b60d..ac8f47f66d5f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1135,6 +1135,7 @@ struct rtl8xxxu_fileops { struct ieee80211_rx_status *rx_status); void (*init_aggregation) (struct rtl8xxxu_priv *priv); void (*init_statistics) (struct rtl8xxxu_priv *priv); + void (*enable_rf) (struct rtl8xxxu_priv *priv); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; -- cgit v1.2.3 From 4a0d7db53124dc81b03926bcec864a0575d94f8e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:18 -0500 Subject: rtl8xxxu: Correct struct rtl8723bu_efuse to list power bases correctly Correct TX power definitions in rtl8723bu_efuse Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 10 ---------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 23 ++++++++++++++++++----- 2 files changed, 18 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5b6b9c5efc86..40447e4eb1d9 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2338,16 +2338,6 @@ static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) ether_addr_copy(priv->mac_addr, efuse->mac_addr); - memcpy(priv->cck_tx_power_index_A, efuse->cck_tx_power_index_A, - sizeof(priv->cck_tx_power_index_A)); - memcpy(priv->cck_tx_power_index_B, efuse->cck_tx_power_index_B, - sizeof(priv->cck_tx_power_index_B)); - - memcpy(priv->ht40_1s_tx_power_index_A, efuse->ht40_1s_tx_power_index_A, - sizeof(priv->ht40_1s_tx_power_index_A)); - memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, - sizeof(priv->ht40_1s_tx_power_index_B)); - priv->has_xtalk = 1; priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index ac8f47f66d5f..6d565812a4b1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -53,6 +53,7 @@ #define RTL8723A_CHANNEL_GROUPS 3 #define RTL8723A_MAX_RF_PATHS 2 +#define RTL8723B_MAX_RF_PATHS 4 #define RF6052_MAX_TX_PWR 0x3f #define EFUSE_MAP_LEN 512 @@ -631,14 +632,26 @@ struct rtl8192cu_efuse { u8 customer_id; }; +struct rtl8723bu_efuse_tx_power { + u8 cck_base[6]; + u8 ht40_base[5]; + struct rtl8723au_idx ht20_ofdm_1s_diff; + struct rtl8723au_idx ht40_ht20_2s_diff; + struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */ + struct rtl8723au_idx ht40_ht20_3s_diff; + struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */ + struct rtl8723au_idx ht40_ht20_4s_diff; + struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */ + u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */ +}; + struct rtl8723bu_efuse { __le16 rtl_id; u8 res0[0x0e]; - u8 cck_tx_power_index_A[3]; /* 0x10 */ - u8 cck_tx_power_index_B[3]; - u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ - u8 ht40_1s_tx_power_index_B[3]; - u8 res1[0x9c]; + struct rtl8723bu_efuse_tx_power tx_power_index_A; /* 0x10 */ + struct rtl8723bu_efuse_tx_power tx_power_index_B; /* 0x3a */ + struct rtl8723bu_efuse_tx_power tx_power_index_C; /* 0x64 */ + struct rtl8723bu_efuse_tx_power tx_power_index_D; /* 0x8e */ u8 channel_plan; /* 0xb8 */ u8 xtal_k; u8 thermal_meter; -- cgit v1.2.3 From e796dab4b987503913418260bce782d230df60db Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:19 -0500 Subject: rtl8xxxu: Introduce set_tx_power() fileop and a new 8723b dummy derivative The 8723b series is significantly different from the older generation in this sense. So far the 8723b version doesn't do anything useful. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 34 ++++++++++++++++++++++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 2 ++ 2 files changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 40447e4eb1d9..de1e6541b1a0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1680,6 +1680,24 @@ static int rtl8723a_channel_to_group(int channel) return group; } +static int rtl8723b_channel_to_group(int channel) +{ + int group; + + if (channel < 3) + group = 0; + else if (channel < 6) + group = 1; + else if (channel < 9) + group = 2; + else if (channel < 12) + group = 3; + else + group = 4; + + return group; +} + static void rtl8723au_config_channel(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; @@ -2041,6 +2059,14 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) } } +static void +rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) +{ + int group; + + group = rtl8723b_channel_to_group(channel); +} + static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, enum nl80211_iftype linktype) { @@ -6378,7 +6404,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * Start out with default power levels for channel 6, 20MHz */ - rtl8723a_set_tx_power(priv, 1, false); + priv->fops->set_tx_power(priv, 1, false); /* Let the 8051 take control of antenna setting */ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2); @@ -7401,7 +7427,7 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) channel = hw->conf.chandef.chan->hw_value; - rtl8723a_set_tx_power(priv, channel, ht40); + priv->fops->set_tx_power(priv, channel, ht40); priv->fops->config_channel(hw); } @@ -8026,6 +8052,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8723au_parse_rx_desc, .enable_rf = rtl8723a_enable_rf, + .set_tx_power = rtl8723a_set_tx_power, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8048,6 +8075,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, .enable_rf = rtl8723b_enable_rf, + .set_tx_power = rtl8723b_set_tx_power, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -8069,6 +8097,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8723au_parse_rx_desc, .enable_rf = rtl8723a_enable_rf, + .set_tx_power = rtl8723a_set_tx_power, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8089,6 +8118,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8723bu_parse_rx_desc, .enable_rf = rtl8723b_enable_rf, + .set_tx_power = rtl8723b_set_tx_power, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 6d565812a4b1..c7b4c92ea732 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1149,6 +1149,8 @@ struct rtl8xxxu_fileops { void (*init_aggregation) (struct rtl8xxxu_priv *priv); void (*init_statistics) (struct rtl8xxxu_priv *priv); void (*enable_rf) (struct rtl8xxxu_priv *priv); + void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel, + bool ht40); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; -- cgit v1.2.3 From 3e84f9386162878deb146a7e4f05710693f1a9c7 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:20 -0500 Subject: rtl8xxxu: Use size of source pointer when copying efuse data Some newer chips have more channel groups in their efuse parameter tables, so use the size of the source, rather than the destination when copying them out. This avoids copying garbage when increasing the common array sizes. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 34 ++++++++++++------------ 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index de1e6541b1a0..4c37bd4b73b1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2318,31 +2318,31 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) memcpy(priv->cck_tx_power_index_A, efuse->cck_tx_power_index_A, - sizeof(priv->cck_tx_power_index_A)); + sizeof(efuse->cck_tx_power_index_A)); memcpy(priv->cck_tx_power_index_B, efuse->cck_tx_power_index_B, - sizeof(priv->cck_tx_power_index_B)); + sizeof(efuse->cck_tx_power_index_B)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->ht40_1s_tx_power_index_A, - sizeof(priv->ht40_1s_tx_power_index_A)); + sizeof(efuse->ht40_1s_tx_power_index_A)); memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, - sizeof(priv->ht40_1s_tx_power_index_B)); + sizeof(efuse->ht40_1s_tx_power_index_B)); memcpy(priv->ht20_tx_power_index_diff, efuse->ht20_tx_power_index_diff, - sizeof(priv->ht20_tx_power_index_diff)); + sizeof(efuse->ht20_tx_power_index_diff)); memcpy(priv->ofdm_tx_power_index_diff, efuse->ofdm_tx_power_index_diff, - sizeof(priv->ofdm_tx_power_index_diff)); + sizeof(efuse->ofdm_tx_power_index_diff)); memcpy(priv->ht40_max_power_offset, efuse->ht40_max_power_offset, - sizeof(priv->ht40_max_power_offset)); + sizeof(efuse->ht40_max_power_offset)); memcpy(priv->ht20_max_power_offset, efuse->ht20_max_power_offset, - sizeof(priv->ht20_max_power_offset)); + sizeof(efuse->ht20_max_power_offset)); if (priv->efuse_wifi.efuse8723.version >= 0x01) { priv->has_xtalk = 1; @@ -2403,34 +2403,34 @@ static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv) memcpy(priv->cck_tx_power_index_A, efuse->cck_tx_power_index_A, - sizeof(priv->cck_tx_power_index_A)); + sizeof(efuse->cck_tx_power_index_A)); memcpy(priv->cck_tx_power_index_B, efuse->cck_tx_power_index_B, - sizeof(priv->cck_tx_power_index_B)); + sizeof(efuse->cck_tx_power_index_B)); memcpy(priv->ht40_1s_tx_power_index_A, efuse->ht40_1s_tx_power_index_A, - sizeof(priv->ht40_1s_tx_power_index_A)); + sizeof(efuse->ht40_1s_tx_power_index_A)); memcpy(priv->ht40_1s_tx_power_index_B, efuse->ht40_1s_tx_power_index_B, - sizeof(priv->ht40_1s_tx_power_index_B)); + sizeof(efuse->ht40_1s_tx_power_index_B)); memcpy(priv->ht40_2s_tx_power_index_diff, efuse->ht40_2s_tx_power_index_diff, - sizeof(priv->ht40_2s_tx_power_index_diff)); + sizeof(efuse->ht40_2s_tx_power_index_diff)); memcpy(priv->ht20_tx_power_index_diff, efuse->ht20_tx_power_index_diff, - sizeof(priv->ht20_tx_power_index_diff)); + sizeof(efuse->ht20_tx_power_index_diff)); memcpy(priv->ofdm_tx_power_index_diff, efuse->ofdm_tx_power_index_diff, - sizeof(priv->ofdm_tx_power_index_diff)); + sizeof(efuse->ofdm_tx_power_index_diff)); memcpy(priv->ht40_max_power_offset, efuse->ht40_max_power_offset, - sizeof(priv->ht40_max_power_offset)); + sizeof(efuse->ht40_max_power_offset)); memcpy(priv->ht20_max_power_offset, efuse->ht20_max_power_offset, - sizeof(priv->ht20_max_power_offset)); + sizeof(efuse->ht20_max_power_offset)); dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name); -- cgit v1.2.3 From 21db9973303caebcbe2fde6dcc668c1aac2ed55f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:21 -0500 Subject: rtl8xxxu: Bump TX power arrays to handle larger channel groups Newer generation chips have more channels groups. In order to carry the larger arrays in common structures, bump the array sizes to match. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 25 +++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index c7b4c92ea732..92768f54ce55 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -53,7 +53,9 @@ #define RTL8723A_CHANNEL_GROUPS 3 #define RTL8723A_MAX_RF_PATHS 2 +#define RTL8723B_CHANNEL_GROUPS 6 #define RTL8723B_MAX_RF_PATHS 4 +#define RTL8XXXU_MAX_CHANNEL_GROUPS 6 #define RF6052_MAX_TX_PWR 0x3f #define EFUSE_MAP_LEN 512 @@ -1046,19 +1048,24 @@ struct rtl8xxxu_priv { u8 mac_addr[ETH_ALEN]; char chip_name[8]; char chip_vendor[8]; - u8 cck_tx_power_index_A[3]; /* 0x10 */ - u8 cck_tx_power_index_B[3]; - u8 ht40_1s_tx_power_index_A[3]; /* 0x16 */ - u8 ht40_1s_tx_power_index_B[3]; + u8 cck_tx_power_index_A[RTL8XXXU_MAX_CHANNEL_GROUPS]; + u8 cck_tx_power_index_B[RTL8XXXU_MAX_CHANNEL_GROUPS]; + u8 ht40_1s_tx_power_index_A[RTL8XXXU_MAX_CHANNEL_GROUPS]; + u8 ht40_1s_tx_power_index_B[RTL8XXXU_MAX_CHANNEL_GROUPS]; /* * The following entries are half-bytes split as: * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed */ - struct rtl8723au_idx ht40_2s_tx_power_index_diff[3]; - struct rtl8723au_idx ht20_tx_power_index_diff[3]; - struct rtl8723au_idx ofdm_tx_power_index_diff[3]; - struct rtl8723au_idx ht40_max_power_offset[3]; - struct rtl8723au_idx ht20_max_power_offset[3]; + struct rtl8723au_idx ht40_2s_tx_power_index_diff[ + RTL8XXXU_MAX_CHANNEL_GROUPS]; + struct rtl8723au_idx ht20_tx_power_index_diff[ + RTL8XXXU_MAX_CHANNEL_GROUPS]; + struct rtl8723au_idx ofdm_tx_power_index_diff[ + RTL8XXXU_MAX_CHANNEL_GROUPS]; + struct rtl8723au_idx ht40_max_power_offset[ + RTL8XXXU_MAX_CHANNEL_GROUPS]; + struct rtl8723au_idx ht20_max_power_offset[ + RTL8XXXU_MAX_CHANNEL_GROUPS]; u32 chip_cut:4; u32 rom_rev:4; u32 is_multi_func:1; -- cgit v1.2.3 From 3be269990814e7f052e134cfc1d538b7010c0600 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:22 -0500 Subject: rtl8xxxu: Parse efuse power indices for 8723bu This should (hopefully) parse the power indices correctly for the 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 43 ++++++++++++++++++++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 43 +++++++++++++++--------- 2 files changed, 71 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4c37bd4b73b1..0723008b4437 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2358,12 +2358,55 @@ static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv) static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu; + int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; ether_addr_copy(priv->mac_addr, efuse->mac_addr); + memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base, + sizeof(efuse->tx_power_index_A.cck_base)); + memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base, + sizeof(efuse->tx_power_index_B.cck_base)); + + memcpy(priv->ht40_1s_tx_power_index_A, + efuse->tx_power_index_A.ht40_base, + sizeof(efuse->tx_power_index_A.ht40_base)); + memcpy(priv->ht40_1s_tx_power_index_B, + efuse->tx_power_index_B.ht40_base, + sizeof(efuse->tx_power_index_B.ht40_base)); + + priv->ofdm_tx_power_diff[0].a = + efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; + priv->ofdm_tx_power_diff[0].b = + efuse->tx_power_index_B.ht20_ofdm_1s_diff.a; + + priv->ht20_tx_power_diff[0].a = + efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; + priv->ht20_tx_power_diff[0].b = + efuse->tx_power_index_B.ht20_ofdm_1s_diff.b; + + priv->ht40_tx_power_diff[0].a = 0; + priv->ht40_tx_power_diff[0].b = 0; + + for (i = 1; i < RTL8723B_TX_COUNT; i++) { + priv->ofdm_tx_power_diff[i].a = + efuse->tx_power_index_A.pwr_diff[i - 1].ofdm; + priv->ofdm_tx_power_diff[i].b = + efuse->tx_power_index_B.pwr_diff[i - 1].ofdm; + + priv->ht20_tx_power_diff[i].a = + efuse->tx_power_index_A.pwr_diff[i - 1].ht20; + priv->ht20_tx_power_diff[i].b = + efuse->tx_power_index_B.pwr_diff[i - 1].ht20; + + priv->ht40_tx_power_diff[i].a = + efuse->tx_power_index_A.pwr_diff[i - 1].ht40; + priv->ht40_tx_power_diff[i].b = + efuse->tx_power_index_B.pwr_diff[i - 1].ht40; + } + priv->has_xtalk = 1; priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 92768f54ce55..fe2356179915 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -54,6 +54,7 @@ #define RTL8723A_CHANNEL_GROUPS 3 #define RTL8723A_MAX_RF_PATHS 2 #define RTL8723B_CHANNEL_GROUPS 6 +#define RTL8723B_TX_COUNT 4 #define RTL8723B_MAX_RF_PATHS 4 #define RTL8XXXU_MAX_CHANNEL_GROUPS 6 #define RF6052_MAX_TX_PWR 0x3f @@ -634,16 +635,25 @@ struct rtl8192cu_efuse { u8 customer_id; }; +struct rtl8723bu_pwr_idx { +#ifdef __LITTLE_ENDIAN + int ht20:4; + int ht40:4; + int ofdm:4; + int cck:4; +#else + int cck:4; + int ofdm:4; + int ht40:4; + int ht20:4; +#endif +} __attribute__((packed)); + struct rtl8723bu_efuse_tx_power { u8 cck_base[6]; u8 ht40_base[5]; struct rtl8723au_idx ht20_ofdm_1s_diff; - struct rtl8723au_idx ht40_ht20_2s_diff; - struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */ - struct rtl8723au_idx ht40_ht20_3s_diff; - struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */ - struct rtl8723au_idx ht40_ht20_4s_diff; - struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */ + struct rtl8723bu_pwr_idx pwr_diff[3]; u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */ }; @@ -1057,15 +1067,18 @@ struct rtl8xxxu_priv { * bits 0-3: path A, bits 4-7: path B, all values 4 bits signed */ struct rtl8723au_idx ht40_2s_tx_power_index_diff[ - RTL8XXXU_MAX_CHANNEL_GROUPS]; - struct rtl8723au_idx ht20_tx_power_index_diff[ - RTL8XXXU_MAX_CHANNEL_GROUPS]; - struct rtl8723au_idx ofdm_tx_power_index_diff[ - RTL8XXXU_MAX_CHANNEL_GROUPS]; - struct rtl8723au_idx ht40_max_power_offset[ - RTL8XXXU_MAX_CHANNEL_GROUPS]; - struct rtl8723au_idx ht20_max_power_offset[ - RTL8XXXU_MAX_CHANNEL_GROUPS]; + RTL8723A_CHANNEL_GROUPS]; + struct rtl8723au_idx ht20_tx_power_index_diff[RTL8723A_CHANNEL_GROUPS]; + struct rtl8723au_idx ofdm_tx_power_index_diff[RTL8723A_CHANNEL_GROUPS]; + struct rtl8723au_idx ht40_max_power_offset[RTL8723A_CHANNEL_GROUPS]; + struct rtl8723au_idx ht20_max_power_offset[RTL8723A_CHANNEL_GROUPS]; + /* + * Newer generation chips only keep power diffs per TX count, + * not per channel group. + */ + struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT]; + struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT]; + struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT]; u32 chip_cut:4; u32 rom_rev:4; u32 is_multi_func:1; -- cgit v1.2.3 From 54bed43f3a67351b1ad75ddcad82587d9e5471e0 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:23 -0500 Subject: rtl8xxxu: Set 8723bu TX power for CCK and OFDM rates This implements support for setting TX power for CCK and OFDM rates on 8723bu. MCS rates is still pending. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 0723008b4437..e8006055c7c4 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2062,9 +2062,30 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) static void rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { - int group; + u32 val32, ofdm; + u8 cck, ofdmbase; + int group, tx_idx; + tx_idx = 0; group = rtl8723b_channel_to_group(channel); + + cck = priv->cck_tx_power_index_B[group]; + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32); + val32 &= 0xffff00ff; + val32 |= (cck << 8); + rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32); + + val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11); + val32 &= 0xff; + val32 |= ((cck << 8) | (cck << 16) | (cck << 24)); + rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32); + + ofdmbase = priv->ht40_1s_tx_power_index_B[group]; + ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b; + ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm); + rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm); } static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, -- cgit v1.2.3 From 1d3cc44dddf8501934172ea141397647f01bb0c6 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:24 -0500 Subject: rtl8xxxu: Set 8723bu MCS TX power This adds the missing support for setting MCS TX power rates on 8723bu. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e8006055c7c4..56813390c0ea 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2062,8 +2062,8 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) static void rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) { - u32 val32, ofdm; - u8 cck, ofdmbase; + u32 val32, ofdm, mcs; + u8 cck, ofdmbase, mcsbase; int group, tx_idx; tx_idx = 0; @@ -2086,6 +2086,16 @@ rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40) rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm); rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm); + + mcsbase = priv->ht40_1s_tx_power_index_B[group]; + if (ht40) + mcsbase += priv->ht40_tx_power_diff[tx_idx++].b; + else + mcsbase += priv->ht20_tx_power_diff[tx_idx++].b; + mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24; + + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs); + rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs); } static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv, -- cgit v1.2.3 From 72143b9e947c593555fa2a869a4bc08b9745b105 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:25 -0500 Subject: rtl8xxxu: Set the correct thermal meter register for 8723bu Older chips use RF register 0x24 to set the thermal meter. Newer chips use register 0x42. This change makes sure to set the correct thermal meter register depending on the chip. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 56813390c0ea..241d68d161aa 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6502,7 +6502,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) /* * This should enable thermal meter */ - rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60); + if (priv->fops->has_s0s1) + rtl8xxxu_write_rfreg(priv, + RF_A, RF6052_REG_T_METER_8723B, 0x37cf8); + else + rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60); /* Init BT hw config. */ if (priv->fops->init_bt) -- cgit v1.2.3 From 80491a1f3c878be44efa9d25fda1bb7b3388ca79 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:26 -0500 Subject: rtl8xxxu: Add definition for 8723bu tx descriptor Newer generation chips use a 40 byte TX descriptor, compared to the 32 byte descriptor used on older chips. This adds the definition for the 40 byte descriptor. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index fe2356179915..f06b88b97bee 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -346,6 +346,22 @@ struct rtl8xxxu_tx_desc { __le16 txdw7; }; +struct rtl8723bu_tx_desc { + __le16 pkt_size; + u8 pkt_offset; + u8 txdw0; + __le32 txdw1; + __le32 txdw2; + __le32 txdw3; + __le32 txdw4; + __le32 txdw5; + __le32 txdw6; + __le16 csum; + __le16 txdw7; + __le32 txdw8; + __le32 txdw9; +}; + /* CCK Rates, TxHT = 0 */ #define DESC_RATE_1M 0x00 #define DESC_RATE_2M 0x01 -- cgit v1.2.3 From 179e1742569620aed495f5d106d019b2dd6057d1 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:27 -0500 Subject: rtl8xxxu: Handle 40 byte TX descriptors for rtl8723bu Note the descriptor checksum is still only calculated over the initial 32 bytes of the descriptor, ignoring the last 8 bytes of the descriptor. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 35 ++++++++++++++++-------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 3 +- 2 files changed, 25 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 241d68d161aa..19bc5d7556bc 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6787,7 +6787,12 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb) return queue; } -static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc) +/* + * Despite newer chips 8723b/8812/8821 having a larger TX descriptor + * format. The descriptor checksum is still only calculated over the + * initial 32 bytes of the descriptor! + */ +static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc) { __le16 *ptr = (__le16 *)tx_desc; u16 csum = 0; @@ -6799,7 +6804,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_tx_desc *tx_desc) */ tx_desc->csum = cpu_to_le16(0); - for (i = 0; i < (sizeof(struct rtl8xxxu_tx_desc) / sizeof(u16)); i++) + for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++) csum = csum ^ le16_to_cpu(ptr[i]); tx_desc->csum |= cpu_to_le16(csum); @@ -6868,13 +6873,15 @@ static void rtl8xxxu_tx_complete(struct urb *urb) struct sk_buff *skb = (struct sk_buff *)urb->context; struct ieee80211_tx_info *tx_info; struct ieee80211_hw *hw; + struct rtl8xxxu_priv *priv; struct rtl8xxxu_tx_urb *tx_urb = container_of(urb, struct rtl8xxxu_tx_urb, urb); tx_info = IEEE80211_SKB_CB(skb); hw = tx_info->rate_driver_data[0]; + priv = hw->priv; - skb_pull(skb, sizeof(struct rtl8xxxu_tx_desc)); + skb_pull(skb, priv->fops->tx_desc_size); ieee80211_tx_info_clear_status(tx_info); tx_info->status.rates[0].idx = -1; @@ -6885,7 +6892,7 @@ static void rtl8xxxu_tx_complete(struct urb *urb) ieee80211_tx_status_irqsafe(hw, skb); - rtl8xxxu_free_tx_urb(hw->priv, tx_urb); + rtl8xxxu_free_tx_urb(priv, tx_urb); } static void rtl8xxxu_dump_action(struct device *dev, @@ -6935,7 +6942,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); struct rtl8xxxu_priv *priv = hw->priv; - struct rtl8xxxu_tx_desc *tx_desc; + struct rtl8723au_tx_desc *tx_desc; struct rtl8xxxu_tx_urb *tx_urb; struct ieee80211_sta *sta = NULL; struct ieee80211_vif *vif = tx_info->control.vif; @@ -6944,16 +6951,17 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, u16 pktlen = skb->len; u16 seq_number; u16 rate_flag = tx_info->control.rates[0].flags; + int tx_desc_size = priv->fops->tx_desc_size; int ret; - if (skb_headroom(skb) < sizeof(struct rtl8xxxu_tx_desc)) { + if (skb_headroom(skb) < tx_desc_size) { dev_warn(dev, "%s: Not enough headroom (%i) for tx descriptor\n", __func__, skb_headroom(skb)); goto error; } - if (unlikely(skb->len > (65535 - sizeof(struct rtl8xxxu_tx_desc)))) { + if (unlikely(skb->len > (65535 - tx_desc_size))) { dev_warn(dev, "%s: Trying to send over-sized skb (%i)\n", __func__, skb->len); goto error; @@ -6977,12 +6985,11 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (control && control->sta) sta = control->sta; - tx_desc = (struct rtl8xxxu_tx_desc *) - skb_push(skb, sizeof(struct rtl8xxxu_tx_desc)); + tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size); - memset(tx_desc, 0, sizeof(struct rtl8xxxu_tx_desc)); + memset(tx_desc, 0, tx_desc_size); tx_desc->pkt_size = cpu_to_le16(pktlen); - tx_desc->pkt_offset = sizeof(struct rtl8xxxu_tx_desc); + tx_desc->pkt_offset = tx_desc_size; tx_desc->txdw0 = TXDESC_OWN | TXDESC_FIRST_SEGMENT | TXDESC_LAST_SEGMENT; @@ -8077,7 +8084,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface, SET_IEEE80211_DEV(priv->hw, &interface->dev); SET_IEEE80211_PERM_ADDR(hw, priv->mac_addr); - hw->extra_tx_headroom = sizeof(struct rtl8xxxu_tx_desc); + hw->extra_tx_headroom = priv->fops->tx_desc_size; ieee80211_hw_set(hw, SIGNAL_DBM); /* * The firmware handles rate control @@ -8134,6 +8141,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, + .tx_desc_size = sizeof(struct rtl8723au_tx_desc), .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, @@ -8157,6 +8165,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, + .tx_desc_size = sizeof(struct rtl8723bu_tx_desc), .has_s0s1 = 1, .adda_1t_init = 0x01c00014, .adda_1t_path_on = 0x01c00014, @@ -8179,6 +8188,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, + .tx_desc_size = sizeof(struct rtl8723au_tx_desc), .adda_1t_init = 0x0b1b25a0, .adda_1t_path_on = 0x0bdb25a0, .adda_2t_path_on_a = 0x04db25a4, @@ -8200,6 +8210,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, + .tx_desc_size = sizeof(struct rtl8723au_tx_desc), .has_s0s1 = 1, .adda_1t_init = 0x0fc01616, .adda_1t_path_on = 0x0fc01616, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index f06b88b97bee..d53912a8326d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -332,7 +332,7 @@ struct rtl8723bu_rx_desc { __le32 tsfl; }; -struct rtl8xxxu_tx_desc { +struct rtl8723au_tx_desc { __le16 pkt_size; u8 pkt_offset; u8 txdw0; @@ -1190,6 +1190,7 @@ struct rtl8xxxu_fileops { int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; + char tx_desc_size; char has_s0s1; u32 adda_1t_init; u32 adda_1t_path_on; -- cgit v1.2.3 From 5e00d5034abf816aa1ca5033ed7c0d6ff04cba87 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:28 -0500 Subject: rtl8xxxu: Do not unconditionally print debug info in rtl8723bu_handle_c2h() Reduce the log level in rtl8723bu_handle_c2h() Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 19bc5d7556bc..49fb5bd2950d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7296,29 +7296,29 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, len = skb->len - 2; - dev_info(dev, "C2H ID %02x seq %02x, len %02x source %02x\n", - c2h->id, c2h->seq, len, c2h->bt_info.response_source); + dev_dbg(dev, "C2H ID %02x seq %02x, len %02x source %02x\n", + c2h->id, c2h->seq, len, c2h->bt_info.response_source); switch(c2h->id) { case C2H_8723B_BT_INFO: if (c2h->bt_info.response_source > BT_INFO_SRC_8723B_BT_ACTIVE_SEND) - dev_info(dev, "C2H_BT_INFO WiFi only firmware\n"); + dev_dbg(dev, "C2H_BT_INFO WiFi only firmware\n"); else - dev_info(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n"); + dev_dbg(dev, "C2H_BT_INFO BT/WiFi coexist firmware\n"); if (c2h->bt_info.bt_has_reset) - dev_info(dev, "BT has been reset\n"); + dev_dbg(dev, "BT has been reset\n"); if (c2h->bt_info.tx_rx_mask) - dev_info(dev, "BT TRx mask\n"); + dev_dbg(dev, "BT TRx mask\n"); break; case C2H_8723B_BT_MP_INFO: - dev_info(dev, "C2H_MP_INFO ext ID %02x, status %02x\n", - c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); + dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n", + c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); break; default: - pr_info("%s: Unhandled C2H event %02x\n", __func__, c2h->id); + dev_info(dev, "Unhandled C2H event %02x\n", c2h->id); break; } } -- cgit v1.2.3 From 0249258db457b1279f40c7adf46bfb2eea817884 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:29 -0500 Subject: rtl8xxxu: Add additional tx descriptor bits for data word 0 This adds documentation for some additional bits in TX descriptor word 0. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index d53912a8326d..9364a31be9bf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -407,8 +407,12 @@ struct rtl8723bu_tx_desc { #define TXDESC_OWN BIT(31) #else #define TXDESC_BROADMULTICAST BIT(0) +#define TXDESC_HTC BIT(1) #define TXDESC_LAST_SEGMENT BIT(2) #define TXDESC_FIRST_SEGMENT BIT(3) +#define TXDESC_LINIP BIT(4) +#define TXDESC_NO_ACM BIT(5) +#define TXDESC_GF BIT(6) #define TXDESC_OWN BIT(7) #endif -- cgit v1.2.3 From ce2d1dbbb4b7b72cfe64d07fbd282f472ea51bbd Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:30 -0500 Subject: rtl8xxxu: Add more 40 byte TX desc bit definitions Add additional bit definitions for 40 byte TX descriptors, and rename bits for 32 byte descriptors that are located differently in the 40 byte descriptor format. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 7 +++--- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 27 +++++++++++++++++++++--- 2 files changed, 28 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 49fb5bd2950d..4cff2c94c88f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7035,11 +7035,12 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, ampdu = (u32)sta->ht_cap.ampdu_density; val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT; tx_desc->txdw2 |= cpu_to_le32(val32); - tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE); + + tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A); } else - tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK); + tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); } else - tx_desc->txdw1 |= cpu_to_le32(TXDESC_BK); + tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); if (ieee80211_is_data_qos(hdr->frame_control)) tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 9364a31be9bf..6ba12ab9ba4e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -417,9 +417,15 @@ struct rtl8723bu_tx_desc { #endif /* Word 1 */ +/* + * Bits 0-7 differ dependent on chip generation. For 8723au bits 5/6 are + * aggregation enable and break respectively. For 8723bu, bits 0-7 are macid. + */ #define TXDESC_PKT_OFFSET_SZ 0 -#define TXDESC_AGG_ENABLE BIT(5) -#define TXDESC_BK BIT(6) +#define TXDESC_AGG_ENABLE_8723A BIT(5) +#define TXDESC_AGG_BREAK_8723A BIT(6) +#define TXDESC_MACID_SHIFT_8723B 0 +#define TXDESC_MACID_MASK_8723B 0x00f0 #define TXDESC_QUEUE_SHIFT 8 #define TXDESC_QUEUE_MASK 0x1f00 #define TXDESC_QUEUE_BK 0x2 @@ -431,6 +437,9 @@ struct rtl8723bu_tx_desc { #define TXDESC_QUEUE_MGNT 0x12 #define TXDESC_QUEUE_CMD 0x13 #define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1) +#define TXDESC_RDG_NAV_EXT_8723B BIT(13) +#define TXDESC_LSIG_TXOP_ENABLE_8723B BIT(14) +#define TXDESC_PIFS_8723B BIT(15) #define DESC_RATE_ID_SHIFT 16 #define DESC_RATE_ID_MASK 0xf @@ -442,8 +451,20 @@ struct rtl8723bu_tx_desc { #define TXDESC_HWPC BIT(31) /* Word 2 */ -#define TXDESC_ACK_REPORT BIT(19) +#define TXDESC_PAID_SHIFT_8723B 0 +#define TXDESC_PAID_MASK_8723B 0x1ff +#define TXDESC_CCA_RTS_SHIFT_8723B 10 +#define TXDESC_CCA_RTS_MASK_8723B 0xc00 +#define TXDESC_AGG_ENABLE_8723B BIT(12) +#define TXDESC_RDG_ENABLE_8723B BIT(13) +#define TXDESC_AGG_BREAK_8723B BIT(16) +#define TXDESC_MORE_FRAG_8723B BIT(17) +#define TXDESC_RAW_8723B BIT(18) +#define TXDESC_ACK_REPORT_8723A BIT(19) +#define TXDESC_SPE_RPT_8723B BIT(19) #define TXDESC_AMPDU_DENSITY_SHIFT 20 +#define TXDESC_BT_INT_8723B BIT(23) +#define TXDESC_GID_8723B BIT(24) /* Word 3 */ #define TXDESC_SEQ_SHIFT 16 -- cgit v1.2.3 From a40ace4f01c77362553455a819eba407f7fccc76 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:31 -0500 Subject: rtl8xxxu: Set the correct TX descriptor bits for agg and break on 8723b Fixup victim of the relocated bits for AGG_ENABLE/AGG_BREAK in the 40 byte TX descriptor Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4cff2c94c88f..dd9addf7badf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6953,6 +6953,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, u16 rate_flag = tx_info->control.rates[0].flags; int tx_desc_size = priv->fops->tx_desc_size; int ret; + bool txdesc40, ampdu_enable; if (skb_headroom(skb) < tx_desc_size) { dev_warn(dev, @@ -6980,6 +6981,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (ieee80211_is_action(hdr->frame_control)) rtl8xxxu_dump_action(dev, hdr); + txdesc40 = (tx_desc_size == 40); tx_info->rate_driver_data[0] = hw; if (control && control->sta) @@ -7028,6 +7030,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, tx_desc->txdw5 |= cpu_to_le32(0x0001ff00); /* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */ + ampdu_enable = false; if (ieee80211_is_data_qos(hdr->frame_control) && sta) { if (sta->ht_cap.ht_supported) { u32 ampdu, val32; @@ -7036,11 +7039,21 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, val32 = ampdu << TXDESC_AMPDU_DENSITY_SHIFT; tx_desc->txdw2 |= cpu_to_le32(val32); + ampdu_enable = true; + } + } + + if (!txdesc40) { + if (ampdu_enable) tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A); - } else + else tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); - } else - tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); + } else { + if (ampdu_enable) + tx_desc->txdw2 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723B); + else + tx_desc->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B); + }; if (ieee80211_is_data_qos(hdr->frame_control)) tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS); -- cgit v1.2.3 From cc2646d4bed4f85bbb72871717f86b625677a874 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:32 -0500 Subject: rtl8xxxu: Set sequence number correctly for 40 byte TX descriptors SEQ changed location in the 40 byte TX descriptor. Set it correctly. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 24 ++++++++++++++++-------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 8 ++++++-- 2 files changed, 22 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index dd9addf7badf..968eac70163d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6943,6 +6943,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info); struct rtl8xxxu_priv *priv = hw->priv; struct rtl8723au_tx_desc *tx_desc; + struct rtl8723bu_tx_desc *tx_desc40; struct rtl8xxxu_tx_urb *tx_urb; struct ieee80211_sta *sta = NULL; struct ieee80211_vif *vif = tx_info->control.vif; @@ -6953,7 +6954,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, u16 rate_flag = tx_info->control.rates[0].flags; int tx_desc_size = priv->fops->tx_desc_size; int ret; - bool txdesc40, ampdu_enable; + bool usedesc40, ampdu_enable; if (skb_headroom(skb) < tx_desc_size) { dev_warn(dev, @@ -6981,7 +6982,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (ieee80211_is_action(hdr->frame_control)) rtl8xxxu_dump_action(dev, hdr); - txdesc40 = (tx_desc_size == 40); + usedesc40 = (tx_desc_size == 40); tx_info->rate_driver_data[0] = hw; if (control && control->sta) @@ -7017,9 +7018,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, } } - seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT); - if (rate_flag & IEEE80211_TX_RC_MCS) rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; else @@ -7043,16 +7041,26 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, } } - if (!txdesc40) { + seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + if (!usedesc40) { + tx_desc->txdw3 = + cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A); + if (ampdu_enable) tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A); else tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); } else { + tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc; + + tx_desc40->txdw9 = + cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B); + if (ampdu_enable) - tx_desc->txdw2 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723B); + tx_desc40->txdw2 |= + cpu_to_le32(TXDESC_AGG_ENABLE_8723B); else - tx_desc->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B); + tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B); }; if (ieee80211_is_data_qos(hdr->frame_control)) diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 6ba12ab9ba4e..8f95896f4ded 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -467,8 +467,8 @@ struct rtl8723bu_tx_desc { #define TXDESC_GID_8723B BIT(24) /* Word 3 */ -#define TXDESC_SEQ_SHIFT 16 -#define TXDESC_SEQ_MASK 0x0fff0000 +#define TXDESC_SEQ_SHIFT_8723A 16 +#define TXDESC_SEQ_MASK_8723A 0x0fff0000 /* Word 4 */ #define TXDESC_QOS BIT(6) @@ -498,6 +498,10 @@ struct rtl8723bu_tx_desc { /* Word 6 */ #define TXDESC_MAX_AGG_SHIFT 11 +/* Word 9 */ +#define TXDESC_SEQ_SHIFT_8723B 12 +#define TXDESC_SEQ_MASK_8723B 0x00fff000 + struct phy_rx_agc_info { #ifdef __LITTLE_ENDIAN u8 gain:7, trsw:1; -- cgit v1.2.3 From 2098bfb5f3277b12dc2f9dc61efd57161c000f77 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:33 -0500 Subject: rtl8723au: Update TX descriptor words 4 and 5 definitions TX data words 4 and 5 differ significantly between 32 byte and 40 byte descriptors. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 11 ++++---- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 32 ++++++++++++++++-------- 2 files changed, 28 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 968eac70163d..2fa587e6d24d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7076,16 +7076,17 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, } if (ieee80211_is_mgmt(hdr->frame_control)) { tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value); - tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE); - tx_desc->txdw5 |= cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT); - tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE); + tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A); + tx_desc->txdw5 |= + cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT_8723A); + tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A); } if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { /* Use RTS rate 24M - does the mac80211 tell us which to use? */ tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M); - tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE); - tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE); + tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A); + tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A); } rtl8xxxu_calc_tx_desc_csum(tx_desc); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 8f95896f4ded..5f60f190104d 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -467,17 +467,21 @@ struct rtl8723bu_tx_desc { #define TXDESC_GID_8723B BIT(24) /* Word 3 */ +#define TXDESC_USE_DRIVER_RATE_8723B BIT(8) +#define TXDESC_CTS_SELF_ENABLE_8723B BIT(11) +#define TXDESC_RTS_CTS_ENABLE_8723B BIT(12) +#define TXDESC_HW_RTS_ENABLE_8723B BIT(13) #define TXDESC_SEQ_SHIFT_8723A 16 #define TXDESC_SEQ_MASK_8723A 0x0fff0000 /* Word 4 */ #define TXDESC_QOS BIT(6) -#define TXDESC_HW_SEQ_ENABLE BIT(7) -#define TXDESC_USE_DRIVER_RATE BIT(8) +#define TXDESC_HW_SEQ_ENABLE_8723A BIT(7) +#define TXDESC_USE_DRIVER_RATE_8723A BIT(8) #define TXDESC_DISABLE_DATA_FB BIT(10) -#define TXDESC_CTS_SELF_ENABLE BIT(11) -#define TXDESC_RTS_CTS_ENABLE BIT(12) -#define TXDESC_HW_RTS_ENABLE BIT(13) +#define TXDESC_CTS_SELF_ENABLE_8723A BIT(11) +#define TXDESC_RTS_CTS_ENABLE_8723A BIT(12) +#define TXDESC_HW_RTS_ENABLE_8723A BIT(13) #define TXDESC_PRIME_CH_OFF_LOWER BIT(20) #define TXDESC_PRIME_CH_OFF_UPPER BIT(21) #define TXDESC_SHORT_PREAMBLE BIT(24) @@ -485,19 +489,27 @@ struct rtl8723bu_tx_desc { #define TXDESC_RTS_DATA_BW BIT(27) #define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28) #define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29) +#define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17) +#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18 +#define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000 +#define TXDESC_RTS_RATE_SHIFT_8723B 24 +#define TXDESC_RTS_RATE_MASK_8723B 0x3f000000 /* Word 5 */ -#define TXDESC_RTS_RATE_SHIFT 0 -#define TXDESC_RTS_RATE_MASK 0x3f +#define TXDESC_RTS_RATE_SHIFT_8723A 0 +#define TXDESC_RTS_RATE_MASK_8723A 0x3f #define TXDESC_SHORT_GI BIT(6) #define TXDESC_CCX_TAG BIT(7) -#define TXDESC_RETRY_LIMIT_ENABLE BIT(17) -#define TXDESC_RETRY_LIMIT_SHIFT 18 -#define TXDESC_RETRY_LIMIT_MASK 0x00fc0000 +#define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17) +#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18 +#define TXDESC_RETRY_LIMIT_MASK_8723A 0x00fc0000 /* Word 6 */ #define TXDESC_MAX_AGG_SHIFT 11 +/* Word 8 */ +#define TXDESC_HW_SEQ_ENABLE_8723B BIT(15) + /* Word 9 */ #define TXDESC_SEQ_SHIFT_8723B 12 #define TXDESC_SEQ_MASK_8723B 0x00fff000 -- cgit v1.2.3 From 2c6670b2a8e6dcd17bedd4dfbac663b2aa3d6949 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:34 -0500 Subject: rtl8xxxu: TX RTS rate is word 4 for 8723a Correct the setting of TX RTS for 8723a generation chips. In addition update documentation to match that this is part of data word 4, note data word 5. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 3 ++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 2fa587e6d24d..4fbf44562d0e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7084,7 +7084,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { /* Use RTS rate 24M - does the mac80211 tell us which to use? */ - tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M); + tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M << + TXDESC_RTS_RATE_SHIFT_8723A); tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A); tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A); } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 5f60f190104d..b015c75f4841 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -475,6 +475,8 @@ struct rtl8723bu_tx_desc { #define TXDESC_SEQ_MASK_8723A 0x0fff0000 /* Word 4 */ +#define TXDESC_RTS_RATE_SHIFT_8723A 0 +#define TXDESC_RTS_RATE_MASK_8723A 0x3f #define TXDESC_QOS BIT(6) #define TXDESC_HW_SEQ_ENABLE_8723A BIT(7) #define TXDESC_USE_DRIVER_RATE_8723A BIT(8) @@ -496,8 +498,6 @@ struct rtl8723bu_tx_desc { #define TXDESC_RTS_RATE_MASK_8723B 0x3f000000 /* Word 5 */ -#define TXDESC_RTS_RATE_SHIFT_8723A 0 -#define TXDESC_RTS_RATE_MASK_8723A 0x3f #define TXDESC_SHORT_GI BIT(6) #define TXDESC_CCX_TAG BIT(7) #define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17) -- cgit v1.2.3 From 4c683607145d22c872d6b473c5f5465f8459c5de Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:35 -0500 Subject: rtl8xxxu: Improve handling of txdesc32 vs txdesc40 handling Further correct the handling of 40 byte TX descriptors. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 123 ++++++++++++++++------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 7 +- 2 files changed, 92 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4fbf44562d0e..927fd3b13ae3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7018,15 +7018,6 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, } } - if (rate_flag & IEEE80211_TX_RC_MCS) - rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; - else - rate = tx_rate->hw_value; - tx_desc->txdw5 = cpu_to_le32(rate); - - if (ieee80211_is_data(hdr->frame_control)) - tx_desc->txdw5 |= cpu_to_le32(0x0001ff00); - /* (tx_info->flags & IEEE80211_TX_CTL_AMPDU) && */ ampdu_enable = false; if (ieee80211_is_data_qos(hdr->frame_control) && sta) { @@ -7041,8 +7032,18 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, } } + if (rate_flag & IEEE80211_TX_RC_MCS) + rate = tx_info->control.rates[0].idx + DESC_RATE_MCS0; + else + rate = tx_rate->hw_value; + seq_number = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); if (!usedesc40) { + tx_desc->txdw5 = cpu_to_le32(rate); + + if (ieee80211_is_data(hdr->frame_control)) + tx_desc->txdw5 |= cpu_to_le32(0x0001ff00); + tx_desc->txdw3 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A); @@ -7050,9 +7051,56 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A); else tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A); + + if (ieee80211_is_mgmt(hdr->frame_control)) { + tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value); + tx_desc->txdw4 |= + cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A); + tx_desc->txdw5 |= + cpu_to_le32(6 << + TXDESC_RETRY_LIMIT_SHIFT_8723A); + tx_desc->txdw5 |= + cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A); + } + + if (ieee80211_is_data_qos(hdr->frame_control)) + tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A); + + if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || + (sta && vif && vif->bss_conf.use_short_preamble)) + tx_desc->txdw4 |= + cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A); + + if (rate_flag & IEEE80211_TX_RC_SHORT_GI || + (ieee80211_is_data_qos(hdr->frame_control) && + sta && sta->ht_cap.cap & + (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) { + tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI); + } + + if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { + /* + * Use RTS rate 24M - does the mac80211 tell + * us which to use? + */ + tx_desc->txdw4 |= + cpu_to_le32(DESC_RATE_24M << + TXDESC_RTS_RATE_SHIFT_8723A); + tx_desc->txdw4 |= + cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A); + tx_desc->txdw4 |= + cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A); + } } else { tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc; + tx_desc40->txdw4 = cpu_to_le32(rate); + if (ieee80211_is_data(hdr->frame_control)) { + tx_desc->txdw4 |= + cpu_to_le32(0x1f << + TXDESC_DATA_RATE_FB_SHIFT_8723B); + } + tx_desc40->txdw9 = cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B); @@ -7061,34 +7109,37 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, cpu_to_le32(TXDESC_AGG_ENABLE_8723B); else tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B); - }; - if (ieee80211_is_data_qos(hdr->frame_control)) - tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS); - if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || - (sta && vif && vif->bss_conf.use_short_preamble)) - tx_desc->txdw4 |= cpu_to_le32(TXDESC_SHORT_PREAMBLE); - if (rate_flag & IEEE80211_TX_RC_SHORT_GI || - (ieee80211_is_data_qos(hdr->frame_control) && - sta && sta->ht_cap.cap & - (IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) { - tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI); - } - if (ieee80211_is_mgmt(hdr->frame_control)) { - tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value); - tx_desc->txdw4 |= cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A); - tx_desc->txdw5 |= - cpu_to_le32(6 << TXDESC_RETRY_LIMIT_SHIFT_8723A); - tx_desc->txdw5 |= cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A); - } - - if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { - /* Use RTS rate 24M - does the mac80211 tell us which to use? */ - tx_desc->txdw4 |= cpu_to_le32(DESC_RATE_24M << - TXDESC_RTS_RATE_SHIFT_8723A); - tx_desc->txdw4 |= cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A); - tx_desc->txdw4 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A); - } + if (ieee80211_is_mgmt(hdr->frame_control)) { + tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value); + tx_desc40->txdw3 |= + cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B); + tx_desc40->txdw4 |= + cpu_to_le32(6 << + TXDESC_RETRY_LIMIT_SHIFT_8723B); + tx_desc40->txdw4 |= + cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B); + } + + if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE || + (sta && vif && vif->bss_conf.use_short_preamble)) + tx_desc40->txdw5 |= + cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B); + + if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) { + /* + * Use RTS rate 24M - does the mac80211 tell + * us which to use? + */ + tx_desc->txdw4 |= + cpu_to_le32(DESC_RATE_24M << + TXDESC_RTS_RATE_SHIFT_8723B); + tx_desc->txdw3 |= + cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B); + tx_desc->txdw3 |= + cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B); + } + }; rtl8xxxu_calc_tx_desc_csum(tx_desc); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index b015c75f4841..687b9a938170 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -477,7 +477,7 @@ struct rtl8723bu_tx_desc { /* Word 4 */ #define TXDESC_RTS_RATE_SHIFT_8723A 0 #define TXDESC_RTS_RATE_MASK_8723A 0x3f -#define TXDESC_QOS BIT(6) +#define TXDESC_QOS_8723A BIT(6) #define TXDESC_HW_SEQ_ENABLE_8723A BIT(7) #define TXDESC_USE_DRIVER_RATE_8723A BIT(8) #define TXDESC_DISABLE_DATA_FB BIT(10) @@ -486,11 +486,13 @@ struct rtl8723bu_tx_desc { #define TXDESC_HW_RTS_ENABLE_8723A BIT(13) #define TXDESC_PRIME_CH_OFF_LOWER BIT(20) #define TXDESC_PRIME_CH_OFF_UPPER BIT(21) -#define TXDESC_SHORT_PREAMBLE BIT(24) +#define TXDESC_SHORT_PREAMBLE_8723A BIT(24) #define TXDESC_DATA_BW BIT(25) #define TXDESC_RTS_DATA_BW BIT(27) #define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28) #define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29) +#define TXDESC_DATA_RATE_FB_SHIFT_8723B 8 +#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00 #define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17) #define TXDESC_RETRY_LIMIT_SHIFT_8723B 18 #define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000 @@ -498,6 +500,7 @@ struct rtl8723bu_tx_desc { #define TXDESC_RTS_RATE_MASK_8723B 0x3f000000 /* Word 5 */ +#define TXDESC_SHORT_PREAMBLE_8723B BIT(4) #define TXDESC_SHORT_GI BIT(6) #define TXDESC_CCX_TAG BIT(7) #define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17) -- cgit v1.2.3 From e975b87cafd24d943f395823b1a000d1389f48ef Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:36 -0500 Subject: rtl8xxxu: Do not parse RX descriptor info for C2H packets C2H events are delivered as RX packets on 8723bu/8192eu. When receiving a C2H event, do not parse the rest of the RX descriptor as the info isn't valid. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 927fd3b13ae3..76793e9a9542 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7324,7 +7324,6 @@ static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, (struct rtl8723bu_rx_desc *)skb->data; struct rtl8723au_phy_stats *phy_stats; int drvinfo_sz, desc_shift; - int rx_type; skb_pull(skb, sizeof(struct rtl8723bu_rx_desc)); @@ -7334,6 +7333,12 @@ static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, desc_shift = rx_desc->shift; skb_pull(skb, drvinfo_sz + desc_shift); + if (rx_desc->rpt_sel) { + struct device *dev = &priv->udev->dev; + dev_dbg(dev, "%s: C2H packet\n", __func__); + return RX_TYPE_C2H; + } + rx_status->mactime = le32_to_cpu(rx_desc->tsfl); rx_status->flag |= RX_FLAG_MACTIME_START; @@ -7351,15 +7356,7 @@ static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, rx_status->rate_idx = rx_desc->rxmcs; } - if (rx_desc->rpt_sel) { - struct device *dev = &priv->udev->dev; - dev_dbg(dev, "%s: C2H packet\n", __func__); - rx_type = RX_TYPE_C2H; - } else { - rx_type = RX_TYPE_DATA_PKT; - } - - return rx_type; + return RX_TYPE_DATA_PKT; } static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, -- cgit v1.2.3 From 80b30b2af59af9400568af05b8b409e5a20da930 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:37 -0500 Subject: rtl8xxxu: Define 8723b H2C ramask command structure Define H2C command structure for setting the rate mask. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 687b9a938170..37657fee242a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -893,7 +893,7 @@ enum h2c_cmd_8723b { /* * Dynamic Mechanism Class: 010 */ - H2C_8723B_MACID_CFG = 0x40, + H2C_8723B_MACID_CFG_RAID = 0x40, H2C_8723B_TXBF = 0x41, H2C_8723B_RSSI_SETTING = 0x42, H2C_8723B_AP_REQ_TXRPT = 0x43, @@ -956,6 +956,15 @@ struct h2c_cmd { u8 arg; __le16 mask_lo; } __packed ramask; + struct { + u8 cmd; + u8 data1; + u8 data2; + u8 ramask1; + u8 ramask2; + u8 ramask3; + u8 ramask4; + } __packed b_macid_cfg; struct { u8 cmd; u8 data1; -- cgit v1.2.3 From f653e69009c633d099b34f7473b037c570c256f8 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:38 -0500 Subject: rtl8xxxu: Implement basic 8723b specific update_rate_mask() function Support for setting bandwidth and VHT parameters is still missing Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 38 ++++++++++++++++++++++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 16 +++++++++- 2 files changed, 50 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 76793e9a9542..5486ad70999f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6609,11 +6609,13 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw, rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); } -static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, - u32 ramask, int sgi) +static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv, + u32 ramask, int sgi) { struct h2c_cmd h2c; + memset(&h2c, 0, sizeof(struct h2c_cmd)); + h2c.ramask.cmd = H2C_SET_RATE_MASK; h2c.ramask.mask_lo = cpu_to_le16(ramask & 0xffff); h2c.ramask.mask_hi = cpu_to_le16(ramask >> 16); @@ -6627,6 +6629,32 @@ static void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask)); } +static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv, + u32 ramask, int sgi) +{ + struct h2c_cmd h2c; + u8 bw = 0; + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + + h2c.b_macid_cfg.cmd = H2C_8723B_MACID_CFG_RAID; + h2c.b_macid_cfg.ramask0 = ramask & 0xff; + h2c.b_macid_cfg.ramask1 = (ramask >> 8) & 0xff; + h2c.b_macid_cfg.ramask2 = (ramask >> 16) & 0xff; + h2c.b_macid_cfg.ramask3 = (ramask >> 24) & 0xff; + + h2c.ramask.arg = 0x80; + h2c.b_macid_cfg.data1 = 0; + if (sgi) + h2c.b_macid_cfg.data1 |= BIT(7); + + h2c.b_macid_cfg.data2 = bw; + + dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n", + __func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg)); + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg)); +} + static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) { u32 val32; @@ -6693,7 +6721,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, sgi = 1; rcu_read_unlock(); - rtl8xxxu_update_rate_mask(priv, ramask, sgi); + priv->fops->update_rate_mask(priv, ramask, sgi); rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff); @@ -8210,6 +8238,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .parse_rx_desc = rtl8723au_parse_rx_desc, .enable_rf = rtl8723a_enable_rf, .set_tx_power = rtl8723a_set_tx_power, + .update_rate_mask = rtl8723au_update_rate_mask, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8234,6 +8263,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_statistics = rtl8723bu_init_statistics, .enable_rf = rtl8723b_enable_rf, .set_tx_power = rtl8723b_set_tx_power, + .update_rate_mask = rtl8723bu_update_rate_mask, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -8257,6 +8287,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .parse_rx_desc = rtl8723au_parse_rx_desc, .enable_rf = rtl8723a_enable_rf, .set_tx_power = rtl8723a_set_tx_power, + .update_rate_mask = rtl8723au_update_rate_mask, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8279,6 +8310,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_rx_desc = rtl8723bu_parse_rx_desc, .enable_rf = rtl8723b_enable_rf, .set_tx_power = rtl8723b_set_tx_power, + .update_rate_mask = rtl8723au_update_rate_mask, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 37657fee242a..eea62f1a6db7 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -958,12 +958,24 @@ struct h2c_cmd { } __packed ramask; struct { u8 cmd; + u8 macid; + /* + * [0:4] - RAID + * [7] - SGI + */ u8 data1; + /* + * [0:1] - Bandwidth + * [3] - No Update + * [4:5] - VHT enable + * [6] - DISPT + * [7] - DISRA + */ u8 data2; + u8 ramask0; u8 ramask1; u8 ramask2; u8 ramask3; - u8 ramask4; } __packed b_macid_cfg; struct { u8 cmd; @@ -1240,6 +1252,8 @@ struct rtl8xxxu_fileops { void (*enable_rf) (struct rtl8xxxu_priv *priv); void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel, bool ht40); + void (*update_rate_mask) (struct rtl8xxxu_priv *priv, + u32 ramask, int sgi); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; -- cgit v1.2.3 From 7d794eaa857e9475e8b02f5a53100cad569194e3 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:39 -0500 Subject: rtl8xxxu: Report media status using the correct H2C command for 8723bu Implement support for nextgen devices reporting connectition to the firmware. The H2C API for reporting connection to the firmware is different between the two device generations. Use the fileops structure to determine which one to call. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 46 ++++++++++++++++++++---- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 8 +++++ 2 files changed, 47 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5486ad70999f..3441361df981 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6655,6 +6655,39 @@ static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv, rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg)); } +static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv, + u8 macid, bool connect) +{ + struct h2c_cmd h2c; + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + + h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT; + + if (connect) + h2c.joinbss.data = H2C_JOIN_BSS_CONNECT; + else + h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT; + + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss)); +} + +static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv, + u8 macid, bool connect) +{ + struct h2c_cmd h2c; + + memset(&h2c, 0, sizeof(struct h2c_cmd)); + + h2c.media_status_rpt.cmd = H2C_8723B_MEDIA_STATUS_RPT; + if (connect) + h2c.media_status_rpt.parm |= BIT(0); + else + h2c.media_status_rpt.parm &= ~BIT(0); + + rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); +} + static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg) { u32 val32; @@ -6687,11 +6720,8 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u8 val8; if (changed & BSS_CHANGED_ASSOC) { - struct h2c_cmd h2c; - dev_dbg(dev, "Changed ASSOC: %i!\n", bss_conf->assoc); - memset(&h2c, 0, sizeof(struct h2c_cmd)); rtl8xxxu_set_linktype(priv, vif->type); if (bss_conf->assoc) { @@ -6731,16 +6761,14 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, rtl8xxxu_write16(priv, REG_BCN_PSR_RPT, 0xc000 | bss_conf->aid); - h2c.joinbss.data = H2C_JOIN_BSS_CONNECT; + priv->fops->report_connect(priv, 0, true); } else { val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL); val8 |= BEACON_DISABLE_TSF_UPDATE; rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8); - h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT; + priv->fops->report_connect(priv, 0, false); } - h2c.joinbss.cmd = H2C_JOIN_BSS_REPORT; - rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss)); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { @@ -8239,6 +8267,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .enable_rf = rtl8723a_enable_rf, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, + .report_connect = rtl8723au_report_connect, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8264,6 +8293,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .enable_rf = rtl8723b_enable_rf, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8723bu_update_rate_mask, + .report_connect = rtl8723bu_report_connect, .writeN_block_size = 1024, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, @@ -8288,6 +8318,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .enable_rf = rtl8723a_enable_rf, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, + .report_connect = rtl8723au_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT_0, .mbox_ext_width = 2, @@ -8311,6 +8342,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .enable_rf = rtl8723b_enable_rf, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, + .report_connect = rtl8723au_report_connect, .writeN_block_size = 128, .mbox_ext_reg = REG_HMBOX_EXT0_8723B, .mbox_ext_width = 4, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index eea62f1a6db7..2191b6bdb1fb 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -956,6 +956,12 @@ struct h2c_cmd { u8 arg; __le16 mask_lo; } __packed ramask; + struct { + u8 cmd; + u8 parm; + u8 macid; + u8 macid_end; + } __packed media_status_rpt; struct { u8 cmd; u8 macid; @@ -1254,6 +1260,8 @@ struct rtl8xxxu_fileops { bool ht40); void (*update_rate_mask) (struct rtl8xxxu_priv *priv, u32 ramask, int sgi); + void (*report_connect) (struct rtl8xxxu_priv *priv, + u8 macid, bool connect); int writeN_block_size; u16 mbox_ext_reg; char mbox_ext_width; -- cgit v1.2.3 From 739dc9f2f5c01dafc7336ffebd9d36bba4f9174e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:40 -0500 Subject: rtl8xxxu: Dump contents of unhandled C2H events Dump the contents of unhandled C2H events. We should be handling all expected events, so this is debugging help in case an unexpected event happens. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 3441361df981..07f4ee0c7cad 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7446,7 +7446,10 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); break; default: - dev_info(dev, "Unhandled C2H event %02x\n", c2h->id); + dev_info(dev, "Unhandled C2H event %02x seq %02x\n", + c2h->id, c2h->seq); + print_hex_dump(KERN_INFO, "C2H content: ", DUMP_PREFIX_NONE, + 16, 1, c2h->raw.payload, len, false); break; } } -- cgit v1.2.3 From 55a18dd18052eaef908efbb27d5c6d7e0569048f Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:41 -0500 Subject: rtl8xxxu: Process C2H RA_REPORT events for 8723bu Handle RA_REPORTS events for 8723bu to not have them show up as unhandled. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 6 ++++++ drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 11 +++++++++++ 2 files changed, 17 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 07f4ee0c7cad..4164b1a3dbd3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7445,6 +7445,12 @@ static void rtl8723bu_handle_c2h(struct rtl8xxxu_priv *priv, dev_dbg(dev, "C2H_MP_INFO ext ID %02x, status %02x\n", c2h->bt_mp_info.ext_id, c2h->bt_mp_info.status); break; + case C2H_8723B_RA_REPORT: + dev_dbg(dev, + "C2H RA RPT: rate %02x, unk %i, macid %02x, noise %i\n", + c2h->ra_report.rate, c2h->ra_report.dummy0_0, + c2h->ra_report.macid, c2h->ra_report.noisy_state); + break; default: dev_info(dev, "Unhandled C2H event %02x seq %02x\n", c2h->id, c2h->seq); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 2191b6bdb1fb..b44cb591bd36 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1033,6 +1033,7 @@ enum c2h_evt_8723b { C2H_8723B_BT_INFO = 9, C2H_8723B_HW_INFO_EXCH = 0x0a, C2H_8723B_BT_MP_INFO = 0x0b, + C2H_8723B_RA_REPORT = 0x0c, C2H_8723B_FW_DEBUG = 0xff, }; @@ -1121,6 +1122,16 @@ struct rtl8723bu_c2h { u8 a4; u8 a5; } __packed bt_info; + struct { + u8 rate:7; + u8 dummy0_0:1; + u8 macid; + u8 ldpc:1; + u8 txbf:1; + u8 noisy_state:1; + u8 dummy2_0:5; + u8 dummy3_0; + } __packed ra_report; }; }; -- cgit v1.2.3 From 87957081b749f3445fc6f10fae57280d45d6c6d5 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:42 -0500 Subject: rtl8xxxu: Pass RX rate to rx_parse_phystats and enable phystats for rtl8723bu rtl8xxxu_rx_parse_phystats() only needs the RX rate to determine whether to handle the stats as CCK or not. Parsing in the rate rather than the rx descriptor elimantes the need to handle multiple rx descriptor formats in the function. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4164b1a3dbd3..4e08f5e05c6e 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7216,13 +7216,13 @@ error: static void rtl8xxxu_rx_parse_phystats(struct rtl8xxxu_priv *priv, struct ieee80211_rx_status *rx_status, - struct rtl8xxxu_rx_desc *rx_desc, - struct rtl8723au_phy_stats *phy_stats) + struct rtl8723au_phy_stats *phy_stats, + u32 rxmcs) { if (phy_stats->sgi_en) rx_status->flag |= RX_FLAG_SHORT_GI; - if (rx_desc->rxmcs < DESC_RATE_6M) { + if (rxmcs < DESC_RATE_6M) { /* * Handle PHY stats for CCK rates */ @@ -7350,7 +7350,8 @@ static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv, skb_pull(skb, drvinfo_sz + desc_shift); if (rx_desc->phy_stats) - rtl8xxxu_rx_parse_phystats(priv, rx_status, rx_desc, phy_stats); + rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, + rx_desc->rxmcs); rx_status->mactime = le32_to_cpu(rx_desc->tsfl); rx_status->flag |= RX_FLAG_MACTIME_START; @@ -7395,6 +7396,10 @@ static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv, return RX_TYPE_C2H; } + if (rx_desc->phy_stats) + rtl8xxxu_rx_parse_phystats(priv, rx_status, phy_stats, + rx_desc->rxmcs); + rx_status->mactime = le32_to_cpu(rx_desc->tsfl); rx_status->flag |= RX_FLAG_MACTIME_START; -- cgit v1.2.3 From 6979494adf57afff0b6a5ad7ba77bdaef63274ad Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:43 -0500 Subject: rtl8xxxu: Remove unncessary semicolon This removes an superfluous semicolon. Reported-by: Fengguang Wu Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 4e08f5e05c6e..ce45c190979b 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -7195,7 +7195,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw, tx_desc->txdw3 |= cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B); } - }; + } rtl8xxxu_calc_tx_desc_csum(tx_desc); -- cgit v1.2.3 From 0290e7d0fd1e91d86289abbdd2d785c945e6bb2e Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:44 -0500 Subject: rtl8xxxu: convert rtl8723bu_init_bt() into rtl8723b_enable_rf() rtl8723bu_init_bt() is effectively the function enabling RF, so name it appropriately. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 11 +---------- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 - 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ce45c190979b..aae45090d42a 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -1596,10 +1596,6 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } -static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) -{ -} - static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv) { u8 sps0; @@ -5953,7 +5949,7 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv, } #endif -static void rtl8723bu_init_bt(struct rtl8xxxu_priv *priv) +static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) { struct h2c_cmd h2c; u32 val32; @@ -6508,10 +6504,6 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) else rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_T_METER, 0x60); - /* Init BT hw config. */ - if (priv->fops->init_bt) - priv->fops->init_bt(priv); - /* Set NAV_UPPER to 30000us */ val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT); rtl8xxxu_write8(priv, REG_NAV_UPPER, val8); @@ -8300,7 +8292,6 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, - .init_bt = rtl8723bu_init_bt, .parse_rx_desc = rtl8723bu_parse_rx_desc, .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index b44cb591bd36..776fd176e280 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1261,7 +1261,6 @@ struct rtl8xxxu_fileops { void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); void (*config_channel) (struct ieee80211_hw *hw); - void (*init_bt) (struct rtl8xxxu_priv *priv); int (*parse_rx_desc) (struct rtl8xxxu_priv *priv, struct sk_buff *skb, struct ieee80211_rx_status *rx_status); void (*init_aggregation) (struct rtl8xxxu_priv *priv); -- cgit v1.2.3 From 37f44dc79a385fed307cdaad6003fd3c0ca99c07 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:45 -0500 Subject: rtl8xxxu: Use define for REG_PWR_DATA bits Use the bit define rather than hard code the value for REG_PWR_DATA bits. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 2 +- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index aae45090d42a..f936e03e0761 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5996,7 +5996,7 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, 0x0067, val8); val32 = rtl8xxxu_read32(priv, REG_PWR_DATA); - val32 |= BIT(11); + val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN; rtl8xxxu_write32(priv, REG_PWR_DATA, val32); /* diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index a055362471e5..e0b9ea3811c0 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -135,7 +135,7 @@ #define EFUSE_ACCESS_DISABLE 0x00 /* RTL8723 only */ #define REG_PWR_DATA 0x0038 -#define PWR_DATA_EEPRPAD_RFE_CTRL_EN BIT(11) +#define PWR_DATA_EEPRPAD_RFE_CTRL_EN BIT(11) #define REG_CAL_TIMER 0x003c #define REG_ACLK_MON 0x003e -- cgit v1.2.3 From fc89a41fa686b41b5339b3eaaa9851fa4c5551a9 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:46 -0500 Subject: rtl8xxxu: Implement 8723bu specific disable_rf() function Powering up the 8723bu RF should probably be matched by the ability to power it down again. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 17 ++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index f936e03e0761..50d2c5c8c153 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -6056,6 +6056,17 @@ static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv) rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan)); } +static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv) +{ + u32 val32; + + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + + val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA); + val32 &= ~(BIT(22) | BIT(23)); + rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32); +} + static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv) { u32 agg_rx; @@ -7987,7 +7998,7 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw) if (priv->usb_interrupts) usb_kill_anchored_urbs(&priv->int_anchor); - rtl8723a_disable_rf(priv); + priv->fops->disable_rf(priv); /* * Disable interrupts @@ -8271,6 +8282,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8723au_parse_rx_desc, .enable_rf = rtl8723a_enable_rf, + .disable_rf = rtl8723a_disable_rf, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, .report_connect = rtl8723au_report_connect, @@ -8296,6 +8308,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .init_aggregation = rtl8723bu_init_aggregation, .init_statistics = rtl8723bu_init_statistics, .enable_rf = rtl8723b_enable_rf, + .disable_rf = rtl8723b_disable_rf, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8723bu_update_rate_mask, .report_connect = rtl8723bu_report_connect, @@ -8321,6 +8334,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .config_channel = rtl8723au_config_channel, .parse_rx_desc = rtl8723au_parse_rx_desc, .enable_rf = rtl8723a_enable_rf, + .disable_rf = rtl8723a_disable_rf, .set_tx_power = rtl8723a_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, .report_connect = rtl8723au_report_connect, @@ -8345,6 +8359,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .config_channel = rtl8723bu_config_channel, .parse_rx_desc = rtl8723bu_parse_rx_desc, .enable_rf = rtl8723b_enable_rf, + .disable_rf = rtl8723b_disable_rf, .set_tx_power = rtl8723b_set_tx_power, .update_rate_mask = rtl8723au_update_rate_mask, .report_connect = rtl8723au_report_connect, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 776fd176e280..9c535e8dbecf 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1266,6 +1266,7 @@ struct rtl8xxxu_fileops { void (*init_aggregation) (struct rtl8xxxu_priv *priv); void (*init_statistics) (struct rtl8xxxu_priv *priv); void (*enable_rf) (struct rtl8xxxu_priv *priv); + void (*disable_rf) (struct rtl8xxxu_priv *priv); void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel, bool ht40); void (*update_rate_mask) (struct rtl8xxxu_priv *priv, -- cgit v1.2.3 From fe37d5f644e5fb7179ff6464482eca9a4ebf555c Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:47 -0500 Subject: rtl8xxxu: Implement device specific power_off function Implment 8723bu specific device power down, and make power_off() a fileops function. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 98 +++++++++++++++++++++- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 + 3 files changed, 99 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 50d2c5c8c153..ef60e83ababd 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5240,6 +5240,64 @@ exit: return ret; } +static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + u32 val32; + int count, ret; + + /* Turn off RF */ + rtl8xxxu_write8(priv, REG_RF_CTRL, 0); + + /* Enable rising edge triggering interrupt */ + val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM); + val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ; + rtl8xxxu_write16(priv, REG_GPIO_INTM, val16); + + /* Release WLON reset 0x04[16]= 1*/ + val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM); + val32 |= APS_FSMCO_WLON_RESET; + rtl8xxxu_write32(priv, REG_GPIO_INTM, val32); + + /* 0x0005[1] = 1 turn off MAC by HW state machine*/ + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + val8 |= BIT(1); + rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8); + + for (count = RTL8XXXU_MAX_REG_POLL; count; count--) { + val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1); + if ((val8 & BIT(1)) == 0) + break; + udelay(10); + } + + if (!count) { + dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n", + __func__); + ret = -EBUSY; + goto exit; + } + + /* Enable BT control XTAL setting */ + val8 = rtl8xxxu_read8(priv, REG_AFE_MISC); + val8 &= ~AFE_MISC_WL_XTAL_CTRL; + rtl8xxxu_write8(priv, REG_AFE_MISC, val8); + + /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */ + val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL); + val8 |= SYS_ISO_ANALOG_IPS; + rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8); + + /* 0x0020[0] = 0 disable LDOA12 MACRO block*/ + val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL); + val8 &= ~LDOA15_ENABLE; + rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8); + +exit: + return ret; +} + static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv) { u8 val8; @@ -5932,6 +5990,38 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e); } +static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 val16; + + /* + * Disable TX report timer + */ + val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); + val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE; + rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); + + rtl8xxxu_write16(priv, REG_CR, 0x0000); + + rtl8xxxu_active_to_lps(priv); + + /* Reset Firmware if running in RAM */ + if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL) + rtl8xxxu_firmware_self_reset(priv); + + /* Reset MCU */ + val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC); + val16 &= ~SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, val16); + + /* Reset MCU ready status */ + rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); + + rtl8723bu_active_to_emu(priv); + rtl8xxxu_emu_to_disabled(priv); +} + #ifdef NEED_PS_TDMA static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv, u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5) @@ -6152,7 +6242,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw) */ if (priv->rtlchip == 0x8723bu) { val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL); - val8 |= BIT(1); + val8 |= TX_REPORT_CTRL_TIMER_ENABLE; rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8); /* Set MAX RPT MACID */ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02); @@ -6545,7 +6635,7 @@ static void rtl8xxxu_disable_device(struct ieee80211_hw *hw) { struct rtl8xxxu_priv *priv = hw->priv; - rtl8xxxu_power_off(priv); + priv->fops->power_off(priv); } static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv, @@ -8277,6 +8367,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .parse_efuse = rtl8723au_parse_efuse, .load_firmware = rtl8723au_load_firmware, .power_on = rtl8723au_power_on, + .power_off = rtl8xxxu_power_off, .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, @@ -8300,6 +8391,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .parse_efuse = rtl8723bu_parse_efuse, .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723bu_power_on, + .power_off = rtl8723bu_power_off, .llt_init = rtl8xxxu_auto_llt_table, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, @@ -8329,6 +8421,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .parse_efuse = rtl8192cu_parse_efuse, .load_firmware = rtl8192cu_load_firmware, .power_on = rtl8192cu_power_on, + .power_off = rtl8xxxu_power_off, .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, @@ -8354,6 +8447,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .parse_efuse = rtl8192eu_parse_efuse, .load_firmware = rtl8192eu_load_firmware, .power_on = rtl8192eu_power_on, + .power_off = rtl8xxxu_power_off, .llt_init = rtl8xxxu_auto_llt_table, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index 9c535e8dbecf..bfa9d6c5d502 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1257,6 +1257,7 @@ struct rtl8xxxu_fileops { int (*parse_efuse) (struct rtl8xxxu_priv *priv); int (*load_firmware) (struct rtl8xxxu_priv *priv); int (*power_on) (struct rtl8xxxu_priv *priv); + void (*power_off) (struct rtl8xxxu_priv *priv); int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index e0b9ea3811c0..a8d5c9f98051 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -512,6 +512,8 @@ #define REG_PKT_LOSE_RPT 0x04e1 #define REG_PTCL_ERR_STATUS 0x04e2 #define REG_TX_REPORT_CTRL 0x04ec +#define TX_REPORT_CTRL_TIMER_ENABLE BIT(1) + #define REG_TX_REPORT_TIME 0x04f0 #define REG_DUMMY 0x04fc -- cgit v1.2.3 From 430b454c5a3951ffaf117d4e613e9cea03f86a90 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:48 -0500 Subject: rtl8xxxu: Flush FIFO before powering down devices This should help when reloading the driver for 8723bu devices Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 34 ++++++++++++++++++++++ .../net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h | 2 ++ 2 files changed, 36 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index ef60e83ababd..e444e2b91f40 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5687,6 +5687,36 @@ static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv) return 0; } +static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv) +{ + u32 val32; + int retry, retval; + + rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff); + + val32 = rtl8xxxu_read32(priv, REG_RXPKT_NUM); + val32 |= RXPKT_NUM_RW_RELEASE_EN; + rtl8xxxu_write32(priv, REG_RXPKT_NUM, val32); + + retry = 100; + retval = -EBUSY; + + do { + val32 = rtl8xxxu_read32(priv, REG_RXPKT_NUM); + if (val32 & RXPKT_NUM_RXDMA_IDLE) { + retval = 0; + break; + } + } while (retry--); + + rtl8xxxu_write16(priv, REG_RQPN_NPQ, 0); + rtl8xxxu_write32(priv, REG_RQPN, 0x80000000); + mdelay(2); + pr_info("%s: retry %i\n", __func__, retry); + + return retval; +} + static int rtl8723au_power_on(struct rtl8xxxu_priv *priv) { u8 val8; @@ -5957,6 +5987,8 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32); } + rtl8xxxu_flush_fifo(priv); + rtl8xxxu_active_to_lps(priv); /* Turn off RF */ @@ -5995,6 +6027,8 @@ static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv) u8 val8; u16 val16; + rtl8xxxu_flush_fifo(priv); + /* * Disable TX report timer */ diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h index a8d5c9f98051..e545e849f5a3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h @@ -404,6 +404,8 @@ #define REG_RXDMA_AGG_PG_TH 0x0280 #define RXDMA_USB_AGG_ENABLE BIT(31) #define REG_RXPKT_NUM 0x0284 +#define RXPKT_NUM_RXDMA_IDLE BIT(17) +#define RXPKT_NUM_RW_RELEASE_EN BIT(18) #define REG_RXDMA_STATUS 0x0288 /* Presumably only found on newer chips such as 8723bu */ -- cgit v1.2.3 From 145428ec7c6794c3ac78ddce2b073076c9c870d2 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:49 -0500 Subject: rtl8xxxu: Print a warning if flushing the FIFO fails Only print a warning if the FIFO flush fails, as opposed to printing the status unconditionally. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index e444e2b91f40..5c08db745d0f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -5689,6 +5689,7 @@ static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv) static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv) { + struct device *dev = &priv->udev->dev; u32 val32; int retry, retval; @@ -5712,7 +5713,9 @@ static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv) rtl8xxxu_write16(priv, REG_RQPN_NPQ, 0); rtl8xxxu_write32(priv, REG_RQPN, 0x80000000); mdelay(2); - pr_info("%s: retry %i\n", __func__, retry); + + if (!retry) + dev_warn(dev, "Failed to flush FIFO\n"); return retval; } -- cgit v1.2.3 From 7d4ccb8bae85550035328bddd20197614dfe415a Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:50 -0500 Subject: rtl8xxxu: Use correct 8051 reset function for 8723b parts 8723b needs more action, so implement support for device specific reset functions. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 40 ++++++++++++++++++++++-- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h | 1 + 2 files changed, 39 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 5c08db745d0f..81777c430312 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2720,12 +2720,44 @@ static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv) val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); sys_func &= ~SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); val8 |= BIT(0); rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + + sys_func |= SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); +} + +static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv) +{ + u8 val8; + u16 sys_func; + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); + val8 &= ~BIT(1); + rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 &= ~BIT(0); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + + sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC); + sys_func &= ~SYS_FUNC_CPU_ENABLE; + rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL); + val8 &= ~BIT(1); + rtl8xxxu_write8(priv, REG_RSV_CTRL, val8); + + val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1); + val8 |= BIT(0); + rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8); + sys_func |= SYS_FUNC_CPU_ENABLE; rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func); } @@ -2758,7 +2790,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv) * Reset the 8051 in order for the firmware to start running, * otherwise it won't come up on the 8192eu */ - rtl8xxxu_reset_8051(priv); + priv->fops->reset_8051(priv); /* Wait for firmware to become ready */ for (i = 0; i < RTL8XXXU_FIRMWARE_POLL_MAX; i++) { @@ -2805,7 +2837,7 @@ static int rtl8xxxu_download_firmware(struct rtl8xxxu_priv *priv) if (val8 & MCU_FW_RAM_SEL) { pr_info("do the RAM reset\n"); rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00); - rtl8xxxu_reset_8051(priv); + priv->fops->reset_8051(priv); } /* MCU firmware download enable */ @@ -8405,6 +8437,7 @@ static struct rtl8xxxu_fileops rtl8723au_fops = { .load_firmware = rtl8723au_load_firmware, .power_on = rtl8723au_power_on, .power_off = rtl8xxxu_power_off, + .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, @@ -8429,6 +8462,7 @@ static struct rtl8xxxu_fileops rtl8723bu_fops = { .load_firmware = rtl8723bu_load_firmware, .power_on = rtl8723bu_power_on, .power_off = rtl8723bu_power_off, + .reset_8051 = rtl8723bu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, @@ -8459,6 +8493,7 @@ static struct rtl8xxxu_fileops rtl8192cu_fops = { .load_firmware = rtl8192cu_load_firmware, .power_on = rtl8192cu_power_on, .power_off = rtl8xxxu_power_off, + .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_init_llt_table, .phy_iq_calibrate = rtl8723au_phy_iq_calibrate, .config_channel = rtl8723au_config_channel, @@ -8485,6 +8520,7 @@ static struct rtl8xxxu_fileops rtl8192eu_fops = { .load_firmware = rtl8192eu_load_firmware, .power_on = rtl8192eu_power_on, .power_off = rtl8xxxu_power_off, + .reset_8051 = rtl8xxxu_reset_8051, .llt_init = rtl8xxxu_auto_llt_table, .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate, .config_channel = rtl8723bu_config_channel, diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index bfa9d6c5d502..7b73654e1368 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1258,6 +1258,7 @@ struct rtl8xxxu_fileops { int (*load_firmware) (struct rtl8xxxu_priv *priv); int (*power_on) (struct rtl8xxxu_priv *priv); void (*power_off) (struct rtl8xxxu_priv *priv); + void (*reset_8051) (struct rtl8xxxu_priv *priv); int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page); void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv); void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv); -- cgit v1.2.3 From ccfe1e85322090649d2fae599e55300c1512bf15 Mon Sep 17 00:00:00 2001 From: Jes Sorensen Date: Mon, 29 Feb 2016 17:05:51 -0500 Subject: rtl8xxxu: Temporarily disable 8192eu device init To reduce the patch volume, temporariliy disable 8192eu device init. Signed-off-by: Jes Sorensen Signed-off-by: Kalle Valo --- drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c index 81777c430312..abdff458b80f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c @@ -2562,6 +2562,10 @@ static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv) raw[i + 6], raw[i + 7]); } } + /* + * Temporarily disable 8192eu support + */ + return -EINVAL; return 0; } -- cgit v1.2.3 From c5fd9cbb9dc40e0d273d6731030e65096adceeda Mon Sep 17 00:00:00 2001 From: Jérôme de Bretagne Date: Thu, 3 Mar 2016 01:46:28 +0100 Subject: Bluetooth: hci_bcm: Add BCM2E55 ACPI ID used in Lenovo ThinkPad Tablet 8 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lenovo ThinkPad Tablet 8 with BCM43241 rev B5 chipset uses the BCM2E55 ACPI ID for Bluetooth. Add it to the list of supported devices. Signed-off-by: Jérôme de Bretagne Signed-off-by: Marcel Holtmann --- drivers/bluetooth/hci_bcm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index bb4c5a00aea0..d8881dc0600c 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -821,6 +821,7 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E3F", 0 }, { "BCM2E40", 0 }, { "BCM2E54", 0 }, + { "BCM2E55", 0 }, { "BCM2E64", 0 }, { "BCM2E65", 0 }, { "BCM2E67", 0 }, -- cgit v1.2.3 From 75c6aca4765dbe3d0c1507ab5052f2e373dc2331 Mon Sep 17 00:00:00 2001 From: Dmitry Tunin Date: Fri, 4 Mar 2016 01:32:19 +0300 Subject: Bluetooth: btusb: Add a new AR3012 ID 13d3:3472 T: Bus=01 Lev=01 Prnt=01 Port=04 Cnt=01 Dev#= 4 Spd=12 MxCh= 0 D: Ver= 1.10 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=13d3 ProdID=3472 Rev=00.01 C: #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr=100mA I: If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb I: If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb BugLink: https://bugs.launchpad.net/bugs/1552925 Signed-off-by: Dmitry Tunin Signed-off-by: Marcel Holtmann Cc: stable@vger.kernel.org --- drivers/bluetooth/ath3k.c | 2 ++ drivers/bluetooth/btusb.c | 1 + 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 93747389dd28..47ca4b39d306 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -120,6 +120,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3408) }, { USB_DEVICE(0x13d3, 0x3423) }, { USB_DEVICE(0x13d3, 0x3432) }, + { USB_DEVICE(0x13d3, 0x3472) }, { USB_DEVICE(0x13d3, 0x3474) }, /* Atheros AR5BBU12 with sflash firmware */ @@ -185,6 +186,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 97f3bba93a8e..0d4e372e426d 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -234,6 +234,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ -- cgit v1.2.3 From 9a01242dc7fc4d5fe3f722afbf35b33aa414cd2f Mon Sep 17 00:00:00 2001 From: Wei-Ning Huang Date: Tue, 8 Mar 2016 11:40:06 +0800 Subject: Bluetooth: btmrvl_sdio: fix firmware activation failure In some case, the btmrvl_sdio firmware would fail to active within the polling time. Increase the polling interval to 100 msec to fix the issue. Signed-off-by: Wei-Ning Huang Signed-off-by: Wei-Ning Huang Signed-off-by: Marcel Holtmann --- drivers/bluetooth/btmrvl_sdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index 6ed8acfcfa9c..c6ef248de5e4 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -371,7 +371,7 @@ static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, if (firmwarestat == FIRMWARE_READY) return 0; - msleep(10); + msleep(100); } return -ETIMEDOUT; -- cgit v1.2.3 From c1b7fca65070bfadca94dd53a4e6b71cd4f69715 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 8 Mar 2016 01:36:28 +0300 Subject: sh_eth: fix NULL pointer dereference in sh_eth_ring_format() In a low memory situation, if netdev_alloc_skb() fails on a first RX ring loop iteration in sh_eth_ring_format(), 'rxdesc' is still NULL. Avoid kernel oops by adding the 'rxdesc' check after the loop. Reported-by: Wolfram Sang Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 9c6448915b65..6fb75a1d68b9 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1154,7 +1154,8 @@ static void sh_eth_ring_format(struct net_device *ndev) mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); /* Mark the last entry as wrapping the ring. */ - rxdesc->status |= cpu_to_le32(RD_RDLE); + if (rxdesc) + rxdesc->status |= cpu_to_le32(RD_RDLE); memset(mdp->tx_ring, 0, tx_ringsize); -- cgit v1.2.3 From d0ba913488dc8c55d1880f5ed34f096dc45fb05d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 8 Mar 2016 01:37:09 +0300 Subject: sh_eth: advance 'rxdesc' later in sh_eth_ring_format() Iff dma_map_single() fails, 'rxdesc' should point to the last filled RX descriptor, so that it can be marked as the last one, however the driver would have already advanced it by that time. In order to fix that, only fill an RX descriptor once all the data for it is ready. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 6fb75a1d68b9..2e9a78164054 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1127,11 +1127,8 @@ static void sh_eth_ring_format(struct net_device *ndev) break; sh_eth_set_receive_align(skb); - /* RX descriptor */ - rxdesc = &mdp->rx_ring[i]; /* The size of the buffer is a multiple of 32 bytes. */ buf_len = ALIGN(mdp->rx_buf_sz, 32); - rxdesc->len = cpu_to_le32(buf_len << 16); dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len, DMA_FROM_DEVICE); if (dma_mapping_error(&ndev->dev, dma_addr)) { @@ -1139,6 +1136,10 @@ static void sh_eth_ring_format(struct net_device *ndev) break; } mdp->rx_skbuff[i] = skb; + + /* RX descriptor */ + rxdesc = &mdp->rx_ring[i]; + rxdesc->len = cpu_to_le32(buf_len << 16); rxdesc->addr = cpu_to_le32(dma_addr); rxdesc->status = cpu_to_le32(RD_RACT | RD_RFP); -- cgit v1.2.3 From 2d9deae4aedee2be2205e22440ac357c37013658 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 7 Mar 2016 18:24:17 -0500 Subject: net: dsa: mv88e6xxx: rework port state setter Apply a few non-functional changes on the port state setter: * add a dynamic debug message with state names to track changes * explicit states checking instead of assuming their numeric values * lock mutex only once when changing several port states * use bitmap macros to declare and access port_state_update_mask Signed-off-by: Vivien Didelot Tested-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 54 +++++++++++++++++++++++++++------------------ drivers/net/dsa/mv88e6xxx.h | 2 +- 2 files changed, 34 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index d11c9d58cf10..3a58a8afe537 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1051,39 +1051,49 @@ static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port, return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too); } -static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state) +static const char * const mv88e6xxx_port_state_names[] = { + [PORT_CONTROL_STATE_DISABLED] = "Disabled", + [PORT_CONTROL_STATE_BLOCKING] = "Blocking/Listening", + [PORT_CONTROL_STATE_LEARNING] = "Learning", + [PORT_CONTROL_STATE_FORWARDING] = "Forwarding", +}; + +static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state) { - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); int reg, ret = 0; u8 oldstate; - mutex_lock(&ps->smi_mutex); - reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL); - if (reg < 0) { - ret = reg; - goto abort; - } + if (reg < 0) + return reg; oldstate = reg & PORT_CONTROL_STATE_MASK; + if (oldstate != state) { /* Flush forwarding database if we're moving a port * from Learning or Forwarding state to Disabled or * Blocking or Listening state. */ - if (oldstate >= PORT_CONTROL_STATE_LEARNING && - state <= PORT_CONTROL_STATE_BLOCKING) { + if ((oldstate == PORT_CONTROL_STATE_LEARNING || + oldstate == PORT_CONTROL_STATE_FORWARDING) + && (state == PORT_CONTROL_STATE_DISABLED || + state == PORT_CONTROL_STATE_BLOCKING)) { ret = _mv88e6xxx_atu_remove(ds, 0, port, false); if (ret) - goto abort; + return ret; } + reg = (reg & ~PORT_CONTROL_STATE_MASK) | state; ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL, reg); + if (ret) + return ret; + + netdev_dbg(ds->ports[port], "PortState %s (was %s)\n", + mv88e6xxx_port_state_names[state], + mv88e6xxx_port_state_names[oldstate]); } -abort: - mutex_unlock(&ps->smi_mutex); return ret; } @@ -1146,13 +1156,11 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) break; } - netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state); - /* mv88e6xxx_port_stp_update may be called with softirqs disabled, * so we can not update the port state directly but need to schedule it. */ ps->ports[port].state = stp_state; - set_bit(port, &ps->port_state_update_mask); + set_bit(port, ps->port_state_update_mask); schedule_work(&ps->bridge_work); return 0; @@ -2228,11 +2236,15 @@ static void mv88e6xxx_bridge_work(struct work_struct *work) ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work); ds = ((struct dsa_switch *)ps) - 1; - while (ps->port_state_update_mask) { - port = __ffs(ps->port_state_update_mask); - clear_bit(port, &ps->port_state_update_mask); - mv88e6xxx_set_port_state(ds, port, ps->ports[port].state); - } + mutex_lock(&ps->smi_mutex); + + for (port = 0; port < ps->num_ports; ++port) + if (test_and_clear_bit(port, ps->port_state_update_mask) && + _mv88e6xxx_port_state(ds, port, ps->ports[port].state)) + netdev_warn(ds->ports[port], "failed to update state to %s\n", + mv88e6xxx_port_state_names[ps->ports[port].state]); + + mutex_unlock(&ps->smi_mutex); } static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index d7b088dd8e16..3425616987ed 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -426,7 +426,7 @@ struct mv88e6xxx_priv_state { struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS]; - unsigned long port_state_update_mask; + DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS); struct work_struct bridge_work; }; -- cgit v1.2.3 From 5da96031834a65e064b97c8d9f7df958c818a4cc Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 7 Mar 2016 18:24:39 -0500 Subject: net: dsa: mv88e6xxx: read then write PVID The port register 0x07 contains more options than just the default VID, even though they are not used yet. So prefer a read then write operation over a direct write. This also allows to keep track of the change through dynamic debug. Signed-off-by: Vivien Didelot Tested-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 3a58a8afe537..1aee42d1c5f2 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1166,23 +1166,45 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state) return 0; } -static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) +static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new, + u16 *old) { + u16 pvid; int ret; ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN); if (ret < 0) return ret; - *pvid = ret & PORT_DEFAULT_VLAN_MASK; + pvid = ret & PORT_DEFAULT_VLAN_MASK; + + if (new) { + ret &= ~PORT_DEFAULT_VLAN_MASK; + ret |= *new & PORT_DEFAULT_VLAN_MASK; + + ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), + PORT_DEFAULT_VLAN, ret); + if (ret < 0) + return ret; + + netdev_dbg(ds->ports[port], "DefaultVID %d (was %d)\n", *new, + pvid); + } + + if (old) + *old = pvid; return 0; } +static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid) +{ + return _mv88e6xxx_port_pvid(ds, port, NULL, pvid); +} + static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid) { - return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN, - pvid & PORT_DEFAULT_VLAN_MASK); + return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL); } static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds) -- cgit v1.2.3 From 5220ef1e39577fbe20989e97eac708d8896966f3 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Mon, 7 Mar 2016 18:24:52 -0500 Subject: net: dsa: mv88e6xxx: avoid writing the same mode There is no need to change the 802.1Q port mode for the same value. Thus avoid such message: [ 401.954836] dsa dsa@0 lan0: 802.1Q Mode: Disabled (was Disabled) Signed-off-by: Vivien Didelot Tested-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 1aee42d1c5f2..5f07524083c3 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1765,16 +1765,21 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, old = ret & PORT_CONTROL_2_8021Q_MASK; - ret &= ~PORT_CONTROL_2_8021Q_MASK; - ret |= new & PORT_CONTROL_2_8021Q_MASK; + if (new != old) { + ret &= ~PORT_CONTROL_2_8021Q_MASK; + ret |= new & PORT_CONTROL_2_8021Q_MASK; - ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, ret); - if (ret < 0) - goto unlock; + ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2, + ret); + if (ret < 0) + goto unlock; + + netdev_dbg(ds->ports[port], "802.1Q Mode %s (was %s)\n", + mv88e6xxx_port_8021q_mode_names[new], + mv88e6xxx_port_8021q_mode_names[old]); + } - netdev_dbg(ds->ports[port], "802.1Q Mode: %s (was %s)\n", - mv88e6xxx_port_8021q_mode_names[new], - mv88e6xxx_port_8021q_mode_names[old]); + ret = 0; unlock: mutex_unlock(&ps->smi_mutex); -- cgit v1.2.3 From 84f670189b8c0dfd3dbaf12da8c946225f4011d3 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 8 Mar 2016 10:50:17 +0530 Subject: cxgb4vf: Enable interrupts before we register our network devices This avoids a race condition where a system that has network devices set up to be automatically configured and we get the first Port Link Status message from the firmware on the Asynchronous Firmware Event Queue before we've enabled interrupts. If that happens, we end up losing the interrupt and never realizing that the links has actually come up. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 51 +++++++++++----------- 1 file changed, 26 insertions(+), 25 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 91857b81009e..fcafe340f625 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2771,6 +2771,24 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } } + /* See what interrupts we'll be using. If we've been configured to + * use MSI-X interrupts, try to enable them but fall back to using + * MSI interrupts if we can't enable MSI-X interrupts. If we can't + * get MSI interrupts we bail with the error. + */ + if (msi == MSI_MSIX && enable_msix(adapter) == 0) + adapter->flags |= USING_MSIX; + else { + err = pci_enable_msi(pdev); + if (err) { + dev_err(&pdev->dev, "Unable to allocate %s interrupts;" + " err=%d\n", + msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); + goto err_free_dev; + } + adapter->flags |= USING_MSI; + } + /* * The "card" is now ready to go. If any errors occur during device * registration we do not fail the whole "card" but rather proceed @@ -2793,7 +2811,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, } if (adapter->registered_device_map == 0) { dev_err(&pdev->dev, "could not register any net devices\n"); - goto err_free_dev; + goto err_disable_interrupts; } /* @@ -2810,25 +2828,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, setup_debugfs(adapter); } - /* - * See what interrupts we'll be using. If we've been configured to - * use MSI-X interrupts, try to enable them but fall back to using - * MSI interrupts if we can't enable MSI-X interrupts. If we can't - * get MSI interrupts we bail with the error. - */ - if (msi == MSI_MSIX && enable_msix(adapter) == 0) - adapter->flags |= USING_MSIX; - else { - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "Unable to allocate %s interrupts;" - " err=%d\n", - msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); - goto err_free_debugfs; - } - adapter->flags |= USING_MSI; - } - /* * Now that we know how many "ports" we have and what their types are, * and how many Queue Sets we can support, we can configure our queue @@ -2856,11 +2855,13 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Error recovery and exit code. Unwind state that's been created * so far and return the error. */ - -err_free_debugfs: - if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { - cleanup_debugfs(adapter); - debugfs_remove_recursive(adapter->debugfs_root); +err_disable_interrupts: + if (adapter->flags & USING_MSIX) { + pci_disable_msix(adapter->pdev); + adapter->flags &= ~USING_MSIX; + } else if (adapter->flags & USING_MSI) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~USING_MSI; } err_free_dev: -- cgit v1.2.3 From 495c22bbb2b03548f6aa870faa7f6b601cb41c85 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 8 Mar 2016 10:50:18 +0530 Subject: cxgb4vf: Configure queue based on resource and interrupt type The Queue Set Configuration code was always reserving room for a Forwarded interrupt Queue even in the cases where we weren't using it. Figure out how many Ports and Queue Sets we can support. This depends on knowing our Virtual Function Resources and may be called a second time if we fall back from MSI-X to MSI Interrupt Mode. This change fixes that problem. Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- .../net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 165 ++++++++++++--------- 1 file changed, 94 insertions(+), 71 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index fcafe340f625..17a31531b26e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2176,6 +2176,73 @@ static void cleanup_debugfs(struct adapter *adapter) /* nothing to do */ } +/* Figure out how many Ports and Queue Sets we can support. This depends on + * knowing our Virtual Function Resources and may be called a second time if + * we fall back from MSI-X to MSI Interrupt Mode. + */ +static void size_nports_qsets(struct adapter *adapter) +{ + struct vf_resources *vfres = &adapter->params.vfres; + unsigned int ethqsets, pmask_nports; + + /* The number of "ports" which we support is equal to the number of + * Virtual Interfaces with which we've been provisioned. + */ + adapter->params.nports = vfres->nvi; + if (adapter->params.nports > MAX_NPORTS) { + dev_warn(adapter->pdev_dev, "only using %d of %d maximum" + " allowed virtual interfaces\n", MAX_NPORTS, + adapter->params.nports); + adapter->params.nports = MAX_NPORTS; + } + + /* We may have been provisioned with more VIs than the number of + * ports we're allowed to access (our Port Access Rights Mask). + * This is obviously a configuration conflict but we don't want to + * crash the kernel or anything silly just because of that. + */ + pmask_nports = hweight32(adapter->params.vfres.pmask); + if (pmask_nports < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d provissioned" + " virtual interfaces; limited by Port Access Rights" + " mask %#x\n", pmask_nports, adapter->params.nports, + adapter->params.vfres.pmask); + adapter->params.nports = pmask_nports; + } + + /* We need to reserve an Ingress Queue for the Asynchronous Firmware + * Event Queue. And if we're using MSI Interrupts, we'll also need to + * reserve an Ingress Queue for a Forwarded Interrupts. + * + * The rest of the FL/Intr-capable ingress queues will be matched up + * one-for-one with Ethernet/Control egress queues in order to form + * "Queue Sets" which will be aportioned between the "ports". For + * each Queue Set, we'll need the ability to allocate two Egress + * Contexts -- one for the Ingress Queue Free List and one for the TX + * Ethernet Queue. + * + * Note that even if we're currently configured to use MSI-X + * Interrupts (module variable msi == MSI_MSIX) we may get downgraded + * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that + * happens we'll need to adjust things later. + */ + ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI); + if (vfres->nethctrl != ethqsets) + ethqsets = min(vfres->nethctrl, ethqsets); + if (vfres->neq < ethqsets*2) + ethqsets = vfres->neq/2; + if (ethqsets > MAX_ETH_QSETS) + ethqsets = MAX_ETH_QSETS; + adapter->sge.max_ethqsets = ethqsets; + + if (adapter->sge.max_ethqsets < adapter->params.nports) { + dev_warn(adapter->pdev_dev, "only using %d of %d available" + " virtual interfaces (too few Queue Sets)\n", + adapter->sge.max_ethqsets, adapter->params.nports); + adapter->params.nports = adapter->sge.max_ethqsets; + } +} + /* * Perform early "adapter" initialization. This is where we discover what * adapter parameters we're going to be using and initialize basic adapter @@ -2183,10 +2250,8 @@ static void cleanup_debugfs(struct adapter *adapter) */ static int adap_init0(struct adapter *adapter) { - struct vf_resources *vfres = &adapter->params.vfres; struct sge_params *sge_params = &adapter->params.sge; struct sge *s = &adapter->sge; - unsigned int ethqsets; int err; u32 param, val = 0; @@ -2295,69 +2360,18 @@ static int adap_init0(struct adapter *adapter) return err; } - /* - * The number of "ports" which we support is equal to the number of - * Virtual Interfaces with which we've been provisioned. - */ - adapter->params.nports = vfres->nvi; - if (adapter->params.nports > MAX_NPORTS) { - dev_warn(adapter->pdev_dev, "only using %d of %d allowed" - " virtual interfaces\n", MAX_NPORTS, - adapter->params.nports); - adapter->params.nports = MAX_NPORTS; - } - - /* - * We need to reserve a number of the ingress queues with Free List - * and Interrupt capabilities for special interrupt purposes (like - * asynchronous firmware messages, or forwarded interrupts if we're - * using MSI). The rest of the FL/Intr-capable ingress queues will be - * matched up one-for-one with Ethernet/Control egress queues in order - * to form "Queue Sets" which will be aportioned between the "ports". - * For each Queue Set, we'll need the ability to allocate two Egress - * Contexts -- one for the Ingress Queue Free List and one for the TX - * Ethernet Queue. - */ - ethqsets = vfres->niqflint - INGQ_EXTRAS; - if (vfres->nethctrl != ethqsets) { - dev_warn(adapter->pdev_dev, "unequal number of [available]" - " ingress/egress queues (%d/%d); using minimum for" - " number of Queue Sets\n", ethqsets, vfres->nethctrl); - ethqsets = min(vfres->nethctrl, ethqsets); - } - if (vfres->neq < ethqsets*2) { - dev_warn(adapter->pdev_dev, "Not enough Egress Contexts (%d)" - " to support Queue Sets (%d); reducing allowed Queue" - " Sets\n", vfres->neq, ethqsets); - ethqsets = vfres->neq/2; - } - if (ethqsets > MAX_ETH_QSETS) { - dev_warn(adapter->pdev_dev, "only using %d of %d allowed Queue" - " Sets\n", MAX_ETH_QSETS, adapter->sge.max_ethqsets); - ethqsets = MAX_ETH_QSETS; - } - if (vfres->niq != 0 || vfres->neq > ethqsets*2) { - dev_warn(adapter->pdev_dev, "unused resources niq/neq (%d/%d)" - " ignored\n", vfres->niq, vfres->neq - ethqsets*2); - } - adapter->sge.max_ethqsets = ethqsets; - - /* - * Check for various parameter sanity issues. Most checks simply - * result in us using fewer resources than our provissioning but we - * do need at least one "port" with which to work ... - */ - if (adapter->sge.max_ethqsets < adapter->params.nports) { - dev_warn(adapter->pdev_dev, "only using %d of %d available" - " virtual interfaces (too few Queue Sets)\n", - adapter->sge.max_ethqsets, adapter->params.nports); - adapter->params.nports = adapter->sge.max_ethqsets; - } - if (adapter->params.nports == 0) { + /* Check for various parameter sanity issues */ + if (adapter->params.vfres.nvi == 0) { dev_err(adapter->pdev_dev, "no virtual interfaces configured/" "usable!\n"); return -EINVAL; } + + /* Initialize nports and max_ethqsets now that we have our Virtual + * Function Resources. + */ + size_nports_qsets(adapter); + return 0; } @@ -2779,16 +2793,32 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, if (msi == MSI_MSIX && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; else { + if (msi == MSI_MSIX) { + dev_info(adapter->pdev_dev, + "Unable to use MSI-X Interrupts; falling " + "back to MSI Interrupts\n"); + + /* We're going to need a Forwarded Interrupt Queue so + * that may cut into how many Queue Sets we can + * support. + */ + msi = MSI_MSI; + size_nports_qsets(adapter); + } err = pci_enable_msi(pdev); if (err) { - dev_err(&pdev->dev, "Unable to allocate %s interrupts;" - " err=%d\n", - msi == MSI_MSIX ? "MSI-X or MSI" : "MSI", err); + dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;" + " err=%d\n", err); goto err_free_dev; } adapter->flags |= USING_MSI; } + /* Now that we know how many "ports" we have and what interrupt + * mechanism we're going to use, we can configure our queue resources. + */ + cfg_queues(adapter); + /* * The "card" is now ready to go. If any errors occur during device * registration we do not fail the whole "card" but rather proceed @@ -2828,13 +2858,6 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, setup_debugfs(adapter); } - /* - * Now that we know how many "ports" we have and what their types are, - * and how many Queue Sets we can support, we can configure our queue - * resources. - */ - cfg_queues(adapter); - /* * Print a short notice on the existence and configuration of the new * VF network device ... -- cgit v1.2.3 From 28f71c6df4c5399e4f30ce37c1fff0f6af3d364d Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 8 Mar 2016 10:50:19 +0530 Subject: cxgb4vf: Add a couple more checks for invalid provisioning configurations Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 17a31531b26e..5a3b8836847d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2361,6 +2361,11 @@ static int adap_init0(struct adapter *adapter) } /* Check for various parameter sanity issues */ + if (adapter->params.vfres.pmask == 0) { + dev_err(adapter->pdev_dev, "no port access configured\n" + "usable!\n"); + return -EINVAL; + } if (adapter->params.vfres.nvi == 0) { dev_err(adapter->pdev_dev, "no virtual interfaces configured/" "usable!\n"); -- cgit v1.2.3 From a8d16d08065f8d3f2fca4a4c377fc4bfc5bdfccd Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Tue, 8 Mar 2016 10:50:20 +0530 Subject: cxgb4vf: Set number of queues in pci probe only Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 5a3b8836847d..1cc8a7a69457 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -790,10 +790,6 @@ static int cxgb4vf_open(struct net_device *dev) /* * Note that this interface is up and start everything up ... */ - netif_set_real_num_tx_queues(dev, pi->nqsets); - err = netif_set_real_num_rx_queues(dev, pi->nqsets); - if (err) - goto err_unwind; err = link_start(dev); if (err) goto err_unwind; @@ -2831,10 +2827,14 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * must register at least one net device. */ for_each_port(adapter, pidx) { + struct port_info *pi = netdev_priv(adapter->port[pidx]); netdev = adapter->port[pidx]; if (netdev == NULL) continue; + netif_set_real_num_tx_queues(netdev, pi->nqsets); + netif_set_real_num_rx_queues(netdev, pi->nqsets); + err = register_netdev(netdev); if (err) { dev_warn(&pdev->dev, "cannot register net device %s," -- cgit v1.2.3 From 5bf93251cee1fb66141d1d2eaff86e04a9397bdf Mon Sep 17 00:00:00 2001 From: Rajesh Borundia Date: Tue, 8 Mar 2016 02:39:57 -0500 Subject: qlcnic: Remove unnecessary usage of atomic_t o atomic_t usage is incorrect as we are not implementing any atomicity. Signed-off-by: Rajesh Borundia Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 2 +- drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 46bbea8e023c..d18667b1b5b7 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1099,7 +1099,7 @@ struct qlcnic_mailbox { unsigned long status; spinlock_t queue_lock; /* Mailbox queue lock */ spinlock_t aen_lock; /* Mailbox response/AEN lock */ - atomic_t rsp_status; + u32 rsp_status; u32 num_cmds; }; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 37a731be7d39..e3d1bb722903 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -491,7 +491,7 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter) static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx) { - atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED); + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; complete(&mbx->completion); } @@ -510,7 +510,7 @@ static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter) if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { - if (atomic_read(&mbx->rsp_status) != rsp_status) + if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } out: @@ -1023,7 +1023,7 @@ static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter) if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); } else { - if (atomic_read(&mbx->rsp_status) != rsp_status) + if (mbx->rsp_status != rsp_status) qlcnic_83xx_notify_mbx_response(mbx); } } @@ -4050,7 +4050,6 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) struct qlcnic_adapter *adapter = mbx->adapter; const struct qlcnic_mbx_ops *mbx_ops = mbx->ops; struct device *dev = &adapter->pdev->dev; - atomic_t *rsp_status = &mbx->rsp_status; struct list_head *head = &mbx->cmd_q; struct qlcnic_hardware_context *ahw; struct qlcnic_cmd_args *cmd = NULL; @@ -4063,7 +4062,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) return; } - atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT); + mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; spin_lock(&mbx->queue_lock); -- cgit v1.2.3 From 819bfe764dceec2f6b4551768453f374b4c60443 Mon Sep 17 00:00:00 2001 From: Rajesh Borundia Date: Tue, 8 Mar 2016 02:39:58 -0500 Subject: qlcnic: Fix mailbox completion handling during spurious interrupt o While the driver is in the middle of a MB completion processing and it receives a spurious MB interrupt, it is mistaken as a good MB completion interrupt leading to premature completion of the next MB request. Fix the driver to guard against this by checking the current state of MB processing and ignore the spurious interrupt. Also added a stats counter to record this condition. Signed-off-by: Rajesh Borundia Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | 1 + drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 15 +++++++++++---- drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 3 ++- 3 files changed, 14 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index d18667b1b5b7..55007f1e6bbc 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -566,6 +566,7 @@ struct qlcnic_adapter_stats { u64 tx_dma_map_error; u64 spurious_intr; u64 mac_filter_limit_overrun; + u64 mbx_spurious_intr; }; /* diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index e3d1bb722903..f9640d5ce6ba 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -2338,9 +2338,9 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter, static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) { + u32 mask, resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED; struct qlcnic_adapter *adapter = data; struct qlcnic_mailbox *mbx; - u32 mask, resp, event; unsigned long flags; mbx = adapter->ahw->mailbox; @@ -2350,10 +2350,14 @@ static irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data) goto out; event = readl(QLCNIC_MBX_FW(adapter->ahw, 0)); - if (event & QLCNIC_MBX_ASYNC_EVENT) + if (event & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - else - qlcnic_83xx_notify_mbx_response(mbx); + } else { + if (mbx->rsp_status != rsp_status) + qlcnic_83xx_notify_mbx_response(mbx); + else + adapter->stats.mbx_spurious_intr++; + } out: mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK); @@ -4053,6 +4057,7 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) struct list_head *head = &mbx->cmd_q; struct qlcnic_hardware_context *ahw; struct qlcnic_cmd_args *cmd = NULL; + unsigned long flags; ahw = adapter->ahw; @@ -4062,7 +4067,9 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work) return; } + spin_lock_irqsave(&mbx->aen_lock, flags); mbx->rsp_status = QLC_83XX_MBX_RESPONSE_WAIT; + spin_unlock_irqrestore(&mbx->aen_lock, flags); spin_lock(&mbx->queue_lock); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 494e8105adee..0a2318cad34d 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -59,7 +59,8 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = { QLC_OFF(stats.mac_filter_limit_overrun)}, {"spurious intr", QLC_SIZEOF(stats.spurious_intr), QLC_OFF(stats.spurious_intr)}, - + {"mbx spurious intr", QLC_SIZEOF(stats.mbx_spurious_intr), + QLC_OFF(stats.mbx_spurious_intr)}, }; static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = { -- cgit v1.2.3 From 88f09bd5b9875a0fbf2075221590d9c4418cdbbc Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Tue, 8 Mar 2016 04:09:44 -0500 Subject: qede: Fix net-next "make ARCH=x86_64" 'commit 55482edc25f0606851de42e73618f813f310d009 ("qede: Add slowpath/fastpath support and enable hardware GRO")' introduces below error when compiling net-next with "make ARCH=x86_64" drivers/built-in.o: In function `qede_rx_int': qede_main.c:(.text+0x6101a0): undefined reference to `tcp_gro_complete' Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qede/qede_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 572862564ab6..518af329502d 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1001,6 +1001,7 @@ static void qede_tpa_start(struct qede_dev *edev, } } +#ifdef CONFIG_INET static void qede_gro_ip_csum(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); @@ -1029,12 +1030,14 @@ static void qede_gro_ipv6_csum(struct sk_buff *skb) &iph->saddr, &iph->daddr, 0); tcp_gro_complete(skb); } +#endif static void qede_gro_receive(struct qede_dev *edev, struct qede_fastpath *fp, struct sk_buff *skb, u16 vlan_tag) { +#ifdef CONFIG_INET if (skb_shinfo(skb)->gso_size) { switch (skb->protocol) { case htons(ETH_P_IP): @@ -1049,7 +1052,7 @@ static void qede_gro_receive(struct qede_dev *edev, ntohs(skb->protocol)); } } - +#endif skb_record_rx_queue(skb, fp->rss_id); qede_skb_receive(edev, fp, skb, vlan_tag); } -- cgit v1.2.3 From 656e705243fd0c2864b89634ea16ed444ef64dc6 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Tue, 8 Mar 2016 11:29:55 +0100 Subject: net-next: mediatek: add support for MT7623 ethernet Add ethernet support for MediaTek SoCs from the MT7623 family. These have dual GMAC. Depending on the exact version, there might be a built-in Gigabit switch (MT7530). The core does not have the typical DMA ring setup. Instead there is a linked list that we add descriptors to. There is only one linked list that both MACs use together. There is a special field inside the TX descriptors called the VQID. This allows us to assign packets to different internal queues. By using a separate id for each MAC we are able to get deterministic results for BQL. Additionally we need to provide the core with a block of scratch memory that is the same size as the RX ring and data buffer. This is really needed to make the HW datapath work. Although the driver does not support this yet, we still need to assign the memory and tell the core about it for RX to work. Signed-off-by: Felix Fietkau Signed-off-by: Michael Lee Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 1807 +++++++++++++++++++++++++++ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 421 +++++++ 2 files changed, 2228 insertions(+) create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c create mode 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c new file mode 100644 index 000000000000..ba3afa5d4640 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -0,0 +1,1807 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2016 John Crispin + * Copyright (C) 2009-2016 Felix Fietkau + * Copyright (C) 2013-2016 Michael Lee + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mtk_eth_soc.h" + +static int mtk_msg_level = -1; +module_param_named(msg_level, mtk_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); + +#define MTK_ETHTOOL_STAT(x) { #x, \ + offsetof(struct mtk_hw_stats, x) / sizeof(u64) } + +/* strings used by ethtool */ +static const struct mtk_ethtool_stats { + char str[ETH_GSTRING_LEN]; + u32 offset; +} mtk_ethtool_stats[] = { + MTK_ETHTOOL_STAT(tx_bytes), + MTK_ETHTOOL_STAT(tx_packets), + MTK_ETHTOOL_STAT(tx_skip), + MTK_ETHTOOL_STAT(tx_collisions), + MTK_ETHTOOL_STAT(rx_bytes), + MTK_ETHTOOL_STAT(rx_packets), + MTK_ETHTOOL_STAT(rx_overflow), + MTK_ETHTOOL_STAT(rx_fcs_errors), + MTK_ETHTOOL_STAT(rx_short_errors), + MTK_ETHTOOL_STAT(rx_long_errors), + MTK_ETHTOOL_STAT(rx_checksum_errors), + MTK_ETHTOOL_STAT(rx_flow_control_packets), +}; + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) +{ + __raw_writel(val, eth->base + reg); +} + +u32 mtk_r32(struct mtk_eth *eth, unsigned reg) +{ + return __raw_readl(eth->base + reg); +} + +static int mtk_mdio_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) + return 0; + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) + break; + usleep_range(10, 20); + } + + dev_err(eth->dev, "mdio: MDIO timeout\n"); + return -1; +} + +u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, + u32 phy_register, u32 write_data) +{ + if (mtk_mdio_busy_wait(eth)) + return -1; + + write_data &= 0xffff; + + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | + (phy_register << PHY_IAC_REG_SHIFT) | + (phy_addr << PHY_IAC_ADDR_SHIFT) | write_data, + MTK_PHY_IAC); + + if (mtk_mdio_busy_wait(eth)) + return -1; + + return 0; +} + +u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) +{ + u32 d; + + if (mtk_mdio_busy_wait(eth)) + return 0xffff; + + mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | + (phy_reg << PHY_IAC_REG_SHIFT) | + (phy_addr << PHY_IAC_ADDR_SHIFT), + MTK_PHY_IAC); + + if (mtk_mdio_busy_wait(eth)) + return 0xffff; + + d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; + + return d; +} + +static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, + int phy_reg, u16 val) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_write(eth, phy_addr, phy_reg, val); +} + +static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_read(eth, phy_addr, phy_reg); +} + +static void mtk_phy_link_adjust(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | + MAC_MCR_BACKPR_EN; + + switch (mac->phy_dev->speed) { + case SPEED_1000: + mcr |= MAC_MCR_SPEED_1000; + break; + case SPEED_100: + mcr |= MAC_MCR_SPEED_100; + break; + }; + + if (mac->phy_dev->link) + mcr |= MAC_MCR_FORCE_LINK; + + if (mac->phy_dev->duplex) + mcr |= MAC_MCR_FORCE_DPX; + + if (mac->phy_dev->pause) + mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; + + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); + + if (mac->phy_dev->link) + netif_carrier_on(dev); + else + netif_carrier_off(dev); +} + +static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, + struct device_node *phy_node) +{ + const __be32 *_addr = NULL; + struct phy_device *phydev; + int phy_mode, addr; + + _addr = of_get_property(phy_node, "reg", NULL); + + if (!_addr || (be32_to_cpu(*_addr) >= 0x20)) { + pr_err("%s: invalid phy address\n", phy_node->name); + return -EINVAL; + } + addr = be32_to_cpu(*_addr); + phy_mode = of_get_phy_mode(phy_node); + if (phy_mode < 0) { + dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode); + return -EINVAL; + } + + phydev = of_phy_connect(eth->netdev[mac->id], phy_node, + mtk_phy_link_adjust, 0, phy_mode); + if (IS_ERR(phydev)) { + dev_err(eth->dev, "could not connect to PHY\n"); + return PTR_ERR(phydev); + } + + dev_info(eth->dev, + "connected mac %d to PHY at %s [uid=%08x, driver=%s]\n", + mac->id, phydev_name(phydev), phydev->phy_id, + phydev->drv->name); + + mac->phy_dev = phydev; + + return 0; +} + +static int mtk_phy_connect(struct mtk_mac *mac) +{ + struct mtk_eth *eth = mac->hw; + struct device_node *np; + u32 val, ge_mode; + + np = of_parse_phandle(mac->of_node, "phy-handle", 0); + if (!np) + return -ENODEV; + + switch (of_get_phy_mode(np)) { + case PHY_INTERFACE_MODE_RGMII: + ge_mode = 0; + break; + case PHY_INTERFACE_MODE_MII: + ge_mode = 1; + break; + case PHY_INTERFACE_MODE_RMII: + ge_mode = 2; + break; + default: + dev_err(eth->dev, "invalid phy_mode\n"); + return -1; + } + + /* put the gmac into the right mode */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); + val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + mtk_phy_connect_node(eth, mac, np); + mac->phy_dev->autoneg = AUTONEG_ENABLE; + mac->phy_dev->speed = 0; + mac->phy_dev->duplex = 0; + mac->phy_dev->supported &= PHY_BASIC_FEATURES; + mac->phy_dev->advertising = mac->phy_dev->supported | + ADVERTISED_Autoneg; + phy_start_aneg(mac->phy_dev); + + return 0; +} + +static int mtk_mdio_init(struct mtk_eth *eth) +{ + struct device_node *mii_np; + int err; + + mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); + if (!mii_np) { + dev_err(eth->dev, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + err = 0; + goto err_put_node; + } + + eth->mii_bus = mdiobus_alloc(); + if (!eth->mii_bus) { + err = -ENOMEM; + goto err_put_node; + } + + eth->mii_bus->name = "mdio"; + eth->mii_bus->read = mtk_mdio_read; + eth->mii_bus->write = mtk_mdio_write; + eth->mii_bus->priv = eth; + eth->mii_bus->parent = eth->dev; + + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name); + err = of_mdiobus_register(eth->mii_bus, mii_np); + if (err) + goto err_free_bus; + + return 0; + +err_free_bus: + kfree(eth->mii_bus); + +err_put_node: + of_node_put(mii_np); + eth->mii_bus = NULL; + return err; +} + +static void mtk_mdio_cleanup(struct mtk_eth *eth) +{ + if (!eth->mii_bus) + return; + + mdiobus_unregister(eth->mii_bus); + of_node_put(eth->mii_bus->dev.of_node); + kfree(eth->mii_bus); +} + +static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) +{ + u32 val; + + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK); + /* flush write */ + mtk_r32(eth, MTK_QDMA_INT_MASK); +} + +static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask) +{ + u32 val; + + val = mtk_r32(eth, MTK_QDMA_INT_MASK); + mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK); + /* flush write */ + mtk_r32(eth, MTK_QDMA_INT_MASK); +} + +static int mtk_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + struct mtk_mac *mac = netdev_priv(dev); + const char *macaddr = dev->dev_addr; + unsigned long flags; + + if (ret) + return ret; + + spin_lock_irqsave(&mac->hw->page_lock, flags); + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MTK_GDMA_MAC_ADRH(mac->id)); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MTK_GDMA_MAC_ADRL(mac->id)); + spin_unlock_irqrestore(&mac->hw->page_lock, flags); + + return 0; +} + +void mtk_stats_update_mac(struct mtk_mac *mac) +{ + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int base = MTK_GDM1_TX_GBCNT; + u64 stats; + + base += hw_stats->reg_offset; + + u64_stats_update_begin(&hw_stats->syncp); + + hw_stats->rx_bytes += mtk_r32(mac->hw, base); + stats = mtk_r32(mac->hw, base + 0x04); + if (stats) + hw_stats->rx_bytes += (stats << 32); + hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08); + hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10); + hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14); + hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18); + hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c); + hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20); + hw_stats->rx_flow_control_packets += + mtk_r32(mac->hw, base + 0x24); + hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28); + hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c); + hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30); + stats = mtk_r32(mac->hw, base + 0x34); + if (stats) + hw_stats->tx_bytes += (stats << 32); + hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38); + u64_stats_update_end(&hw_stats->syncp); +} + +static void mtk_stats_update(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->mac[i] || !eth->mac[i]->hw_stats) + continue; + if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { + mtk_stats_update_mac(eth->mac[i]); + spin_unlock(ð->mac[i]->hw_stats->stats_lock); + } + } +} + +static struct rtnl_link_stats64 *mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int start; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock(&hw_stats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock(&hw_stats->stats_lock); + } + } + + do { + start = u64_stats_fetch_begin_irq(&hw_stats->syncp); + storage->rx_packets = hw_stats->rx_packets; + storage->tx_packets = hw_stats->tx_packets; + storage->rx_bytes = hw_stats->rx_bytes; + storage->tx_bytes = hw_stats->tx_bytes; + storage->collisions = hw_stats->tx_collisions; + storage->rx_length_errors = hw_stats->rx_short_errors + + hw_stats->rx_long_errors; + storage->rx_over_errors = hw_stats->rx_overflow; + storage->rx_crc_errors = hw_stats->rx_fcs_errors; + storage->rx_errors = hw_stats->rx_checksum_errors; + storage->tx_aborted_errors = hw_stats->tx_skip; + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); + + storage->tx_errors = dev->stats.tx_errors; + storage->rx_dropped = dev->stats.rx_dropped; + storage->tx_dropped = dev->stats.tx_dropped; + + return storage; +} + +static inline int mtk_max_frag_size(int mtu) +{ + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ + if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH) + mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static inline int mtk_max_buf_size(int frag_size) +{ + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + WARN_ON(buf_size < MTK_MAX_RX_LENGTH); + + return buf_size; +} + +static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, + struct mtk_rx_dma *dma_rxd) +{ + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); +} + +/* the qdma core needs scratch memory to be setup */ +static int mtk_init_fq_dma(struct mtk_eth *eth) +{ + unsigned int phy_ring_head, phy_ring_tail; + int cnt = MTK_DMA_SIZE; + dma_addr_t dma_addr; + int i; + + eth->scratch_ring = dma_alloc_coherent(eth->dev, + cnt * sizeof(struct mtk_tx_dma), + &phy_ring_head, + GFP_ATOMIC | __GFP_ZERO); + if (unlikely(!eth->scratch_ring)) + return -ENOMEM; + + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, + GFP_KERNEL); + dma_addr = dma_map_single(eth->dev, + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + return -ENOMEM; + + memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt); + phy_ring_tail = phy_ring_head + + (sizeof(struct mtk_tx_dma) * (cnt - 1)); + + for (i = 0; i < cnt; i++) { + eth->scratch_ring[i].txd1 = + (dma_addr + (i * MTK_QDMA_PAGE_SIZE)); + if (i < cnt - 1) + eth->scratch_ring[i].txd2 = (phy_ring_head + + ((i + 1) * sizeof(struct mtk_tx_dma))); + eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); + } + + mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD); + mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); + mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); + + return 0; +} + +static inline void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) +{ + void *ret = ring->dma; + + return ret + (desc - ring->phys); +} + +static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, + struct mtk_tx_dma *txd) +{ + int idx = txd - ring->dma; + + return &ring->buf[idx]; +} + +static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf) +{ + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { + dma_unmap_single(dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { + dma_unmap_page(dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + tx_buf->flags = 0; + if (tx_buf->skb && + (tx_buf->skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC)) + dev_kfree_skb_any(tx_buf->skb); + tx_buf->skb = NULL; +} + +static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + int tx_num, struct mtk_tx_ring *ring, bool gso) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_buf *tx_buf; + unsigned long flags; + dma_addr_t mapped_addr; + unsigned int nr_frags; + int i, n_desc = 1; + u32 txd4 = 0; + + itxd = ring->next_free; + if (itxd == ring->last_free) + return -ENOMEM; + + /* set the forward port */ + txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT; + + tx_buf = mtk_desc_to_tx_buf(ring, itxd); + memset(tx_buf, 0, sizeof(*tx_buf)); + + if (gso) + txd4 |= TX_DMA_TSO; + + /* TX Checksum offload */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + txd4 |= TX_DMA_CHKSUM; + + /* VLAN header offload */ + if (skb_vlan_tag_present(skb)) + txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb); + + mapped_addr = dma_map_single(&dev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + return -ENOMEM; + + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running ont he same ring so we need to lock + * the ring access + */ + spin_lock_irqsave(ð->page_lock, flags); + WRITE_ONCE(itxd->txd1, mapped_addr); + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); + + /* TX SG offload */ + txd = itxd; + nr_frags = skb_shinfo(skb)->nr_frags; + for (i = 0; i < nr_frags; i++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; + unsigned int offset = 0; + int frag_size = skb_frag_size(frag); + + while (frag_size) { + bool last_frag = false; + unsigned int frag_map_size; + + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + if (txd == ring->last_free) + goto err_dma; + + n_desc++; + frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN); + mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, + frag_map_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) + goto err_dma; + + if (i == nr_frags - 1 && + (frag_size - frag_map_size) == 0) + last_frag = true; + + WRITE_ONCE(txd->txd1, mapped_addr); + WRITE_ONCE(txd->txd3, (TX_DMA_SWC | + TX_DMA_PLEN0(frag_map_size) | + last_frag * TX_DMA_LS0) | + mac->id); + WRITE_ONCE(txd->txd4, 0); + + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; + tx_buf = mtk_desc_to_tx_buf(ring, txd); + memset(tx_buf, 0, sizeof(*tx_buf)); + + tx_buf->flags |= MTK_TX_FLAGS_PAGE0; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); + frag_size -= frag_map_size; + offset += frag_map_size; + } + } + + /* store skb to cleanup */ + tx_buf->skb = skb; + + WRITE_ONCE(itxd->txd4, txd4); + WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | + (!nr_frags * TX_DMA_LS0))); + + spin_unlock_irqrestore(ð->page_lock, flags); + + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); + + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); + atomic_sub(n_desc, &ring->free_count); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) + mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); + + return 0; + +err_dma: + do { + tx_buf = mtk_desc_to_tx_buf(ring, txd); + + /* unmap dma */ + mtk_tx_unmap(&dev->dev, tx_buf); + + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + } while (itxd != txd); + + return -ENOMEM; +} + +static inline int mtk_cal_txd_req(struct sk_buff *skb) +{ + int i, nfrags; + struct skb_frag_struct *frag; + + nfrags = 1; + if (skb_is_gso(skb)) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nfrags += DIV_ROUND_UP(frag->size, MTK_TX_DMA_BUF_LEN); + } + } else { + nfrags += skb_shinfo(skb)->nr_frags; + } + + return DIV_ROUND_UP(nfrags, 2); +} + +static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_ring *ring = ð->tx_ring; + struct net_device_stats *stats = &dev->stats; + bool gso = false; + int tx_num; + + tx_num = mtk_cal_txd_req(skb); + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { + netif_stop_queue(dev); + netif_err(eth, tx_queued, dev, + "Tx Ring full when queue awake!\n"); + return NETDEV_TX_BUSY; + } + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) { + netif_warn(eth, tx_err, dev, + "GSO expand head fail.\n"); + goto drop; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + gso = true; + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); + } + } + + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) + goto drop; + + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) { + netif_stop_queue(dev); + if (unlikely(atomic_read(&ring->free_count) > + ring->thresh)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +drop: + stats->tx_dropped++; + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static int mtk_poll_rx(struct napi_struct *napi, int budget, + struct mtk_eth *eth, u32 rx_intr) +{ + struct mtk_rx_ring *ring = ð->rx_ring; + int idx = ring->calc_idx; + struct sk_buff *skb; + u8 *data, *new_data; + struct mtk_rx_dma *rxd, trxd; + int done = 0; + + while (done < budget) { + struct net_device *netdev; + unsigned int pktlen; + dma_addr_t dma_addr; + int mac = 0; + + idx = NEXT_RX_DESP_IDX(idx); + rxd = &ring->dma[idx]; + data = ring->data[idx]; + + mtk_rx_get_desc(&trxd, rxd); + if (!(trxd.rxd2 & RX_DMA_DONE)) + break; + + /* find out which mac the packet come from. values start at 1 */ + mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) & + RX_DMA_FPORT_MASK; + mac--; + + netdev = eth->netdev[mac]; + + /* alloc new buffer */ + new_data = napi_alloc_frag(ring->frag_size); + if (unlikely(!new_data)) { + netdev->stats.rx_dropped++; + goto release_desc; + } + dma_addr = dma_map_single(ð->netdev[mac]->dev, + new_data + NET_SKB_PAD, + ring->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { + skb_free_frag(new_data); + goto release_desc; + } + + /* receive data */ + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { + put_page(virt_to_head_page(new_data)); + goto release_desc; + } + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + + dma_unmap_single(&netdev->dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); + skb->dev = netdev; + skb_put(skb, pktlen); + if (trxd.rxd4 & RX_DMA_L4_VALID) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, netdev); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX && + RX_DMA_VID(trxd.rxd3)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + RX_DMA_VID(trxd.rxd3)); + napi_gro_receive(napi, skb); + + ring->data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; + +release_desc: + rxd->rxd2 = RX_DMA_PLEN0(ring->buf_size); + + ring->calc_idx = idx; + /* make sure that all changes to the dma ring are flushed before + * we continue + */ + wmb(); + mtk_w32(eth, ring->calc_idx, MTK_QRX_CRX_IDX0); + done++; + } + + if (done < budget) + mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS); + + return done; +} + +static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma *desc; + struct sk_buff *skb; + struct mtk_tx_buf *tx_buf; + int total = 0, done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + u32 cpu, dma; + static int condition; + int i; + + memset(done, 0, sizeof(done)); + memset(bytes, 0, sizeof(bytes)); + + cpu = mtk_r32(eth, MTK_QTX_CRX_PTR); + dma = mtk_r32(eth, MTK_QTX_DRX_PTR); + + desc = mtk_qdma_phys_to_virt(ring, cpu); + + while ((cpu != dma) && budget) { + u32 next_cpu = desc->txd2; + int mac; + + desc = mtk_qdma_phys_to_virt(ring, desc->txd2); + if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) + break; + + mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & + TX_DMA_FPORT_MASK; + mac--; + + tx_buf = mtk_desc_to_tx_buf(ring, desc); + skb = tx_buf->skb; + if (!skb) { + condition = 1; + break; + } + + if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) { + bytes[mac] += skb->len; + done[mac]++; + budget--; + } + mtk_tx_unmap(eth->dev, tx_buf); + + ring->last_free->txd2 = next_cpu; + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = next_cpu; + } + + mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || !done[i]) + continue; + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); + total += done[i]; + } + + /* read hw index again make sure no new tx packet */ + if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR)) + *tx_again = true; + else + mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS); + + if (!total) + return 0; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || + unlikely(!netif_queue_stopped(eth->netdev[i]))) + continue; + if (atomic_read(&ring->free_count) > ring->thresh) + netif_wake_queue(eth->netdev[i]); + } + + return total; +} + +static int mtk_poll(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); + u32 status, status2, mask, tx_intr, rx_intr, status_intr; + int tx_done, rx_done; + bool tx_again = false; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + status2 = mtk_r32(eth, MTK_INT_STATUS2); + tx_intr = MTK_TX_DONE_INT; + rx_intr = MTK_RX_DONE_INT; + status_intr = (MTK_GDM1_AF | MTK_GDM2_AF); + tx_done = 0; + rx_done = 0; + tx_again = 0; + + if (status & tx_intr) + tx_done = mtk_poll_tx(eth, budget, &tx_again); + + if (status & rx_intr) + rx_done = mtk_poll_rx(napi, budget, eth, rx_intr); + + if (unlikely(status2 & status_intr)) { + mtk_stats_update(eth); + mtk_w32(eth, status_intr, MTK_INT_STATUS2); + } + + if (unlikely(netif_msg_intr(eth))) { + mask = mtk_r32(eth, MTK_QDMA_INT_MASK); + netdev_info(eth->netdev[0], + "done tx %d, rx %d, intr 0x%08x/0x%x\n", + tx_done, rx_done, status, mask); + } + + if (tx_again || rx_done == budget) + return budget; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + if (status & (tx_intr | rx_intr)) + return budget; + + napi_complete(napi); + mtk_irq_enable(eth, tx_intr | rx_intr); + + return rx_done; +} + +static int mtk_tx_alloc(struct mtk_eth *eth) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + int i, sz = sizeof(*ring->dma); + + ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), + GFP_KERNEL); + if (!ring->buf) + goto no_tx_mem; + + ring->dma = dma_alloc_coherent(eth->dev, + MTK_DMA_SIZE * sz, + &ring->phys, + GFP_ATOMIC | __GFP_ZERO); + if (!ring->dma) + goto no_tx_mem; + + memset(ring->dma, 0, MTK_DMA_SIZE * sz); + for (i = 0; i < MTK_DMA_SIZE; i++) { + int next = (i + 1) % MTK_DMA_SIZE; + u32 next_ptr = ring->phys + next * sz; + + ring->dma[i].txd2 = next_ptr; + ring->dma[i].txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + } + + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->next_free = &ring->dma[0]; + ring->last_free = &ring->dma[MTK_DMA_SIZE - 2]; + ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2, + MAX_SKB_FRAGS); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); + mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_CRX_PTR); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + MTK_QTX_DRX_PTR); + + return 0; + +no_tx_mem: + return -ENOMEM; +} + +static void mtk_tx_clean(struct mtk_eth *eth) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + int i; + + if (ring->buf) { + for (i = 0; i < MTK_DMA_SIZE; i++) + mtk_tx_unmap(eth->dev, &ring->buf[i]); + kfree(ring->buf); + ring->buf = NULL; + } + + if (ring->dma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); + ring->dma = NULL; + } +} + +static int mtk_rx_alloc(struct mtk_eth *eth) +{ + struct mtk_rx_ring *ring = ð->rx_ring; + int i; + + ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN); + ring->buf_size = mtk_max_buf_size(ring->frag_size); + ring->data = kcalloc(MTK_DMA_SIZE, sizeof(*ring->data), + GFP_KERNEL); + if (!ring->data) + return -ENOMEM; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (!ring->data[i]) + return -ENOMEM; + } + + ring->dma = dma_alloc_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + &ring->phys, + GFP_ATOMIC | __GFP_ZERO); + if (!ring->dma) + return -ENOMEM; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + dma_addr_t dma_addr = dma_map_single(eth->dev, + ring->data[i] + NET_SKB_PAD, + ring->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dev, dma_addr))) + return -ENOMEM; + ring->dma[i].rxd1 = (unsigned int)dma_addr; + + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size); + } + ring->calc_idx = MTK_DMA_SIZE - 1; + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + mtk_w32(eth, eth->rx_ring.phys, MTK_QRX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MTK_QRX_MAX_CNT0); + mtk_w32(eth, eth->rx_ring.calc_idx, MTK_QRX_CRX_IDX0); + mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0)); + + return 0; +} + +static void mtk_rx_clean(struct mtk_eth *eth) +{ + struct mtk_rx_ring *ring = ð->rx_ring; + int i; + + if (ring->data && ring->dma) { + for (i = 0; i < MTK_DMA_SIZE; i++) { + if (!ring->data[i]) + continue; + if (!ring->dma[i].rxd1) + continue; + dma_unmap_single(eth->dev, + ring->dma[i].rxd1, + ring->buf_size, + DMA_FROM_DEVICE); + skb_free_frag(ring->data[i]); + } + kfree(ring->data); + ring->data = NULL; + } + + if (ring->dma) { + dma_free_coherent(eth->dev, + MTK_DMA_SIZE * sizeof(*ring->dma), + ring->dma, + ring->phys); + ring->dma = NULL; + } +} + +/* wait for DMA to finish whatever it is doing before we start using it again */ +static int mtk_dma_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_r32(eth, MTK_QDMA_GLO_CFG) & + (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY))) + return 0; + if (time_after(jiffies, t_start + MTK_DMA_BUSY_TIMEOUT)) + break; + } + + dev_err(eth->dev, "DMA init timeout\n"); + return -1; +} + +static int mtk_dma_init(struct mtk_eth *eth) +{ + int err; + + if (mtk_dma_busy_wait(eth)) + return -EBUSY; + + /* QDMA needs scratch memory for internal reordering of the + * descriptors + */ + err = mtk_init_fq_dma(eth); + if (err) + return err; + + err = mtk_tx_alloc(eth); + if (err) + return err; + + err = mtk_rx_alloc(eth); + if (err) + return err; + + /* Enable random early drop and set drop threshold automatically */ + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN, + MTK_QDMA_FC_THRES); + mtk_w32(eth, 0x0, MTK_QDMA_HRED2); + + return 0; +} + +static void mtk_dma_free(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) + if (eth->netdev[i]) + netdev_reset_queue(eth->netdev[i]); + mtk_tx_clean(eth); + mtk_rx_clean(eth); + kfree(eth->scratch_head); +} + +static void mtk_tx_timeout(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + eth->netdev[mac->id]->stats.tx_errors++; + netif_err(eth, tx_err, dev, + "transmit timed out\n"); + schedule_work(&mac->pending_work); +} + +static irqreturn_t mtk_handle_irq(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + u32 status; + + status = mtk_r32(eth, MTK_QMTK_INT_STATUS); + if (unlikely(!status)) + return IRQ_NONE; + + if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) { + if (likely(napi_schedule_prep(ð->rx_napi))) + __napi_schedule(ð->rx_napi); + } else { + mtk_w32(eth, status, MTK_QMTK_INT_STATUS); + } + mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT)); + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mtk_poll_controller(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT; + + mtk_irq_disable(eth, int_mask); + mtk_handle_irq(dev->irq, dev); + mtk_irq_enable(eth, int_mask); +} +#endif + +static int mtk_start_dma(struct mtk_eth *eth) +{ + int err; + + err = mtk_dma_init(eth); + if (err) { + mtk_dma_free(eth); + return err; + } + + mtk_w32(eth, + MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN | + MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS | + MTK_RX_BT_32DWORDS, + MTK_QDMA_GLO_CFG); + + return 0; +} + +static int mtk_open(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + /* we run 2 netdevs on the same dma ring so we only bring it up once */ + if (!atomic_read(ð->dma_refcnt)) { + int err = mtk_start_dma(eth); + + if (err) + return err; + + napi_enable(ð->rx_napi); + mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + } + atomic_inc(ð->dma_refcnt); + + phy_start(mac->phy_dev); + netif_start_queue(dev); + + return 0; +} + +static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) +{ + unsigned long flags; + u32 val; + int i; + + /* stop the dma engine */ + spin_lock_irqsave(ð->page_lock, flags); + val = mtk_r32(eth, glo_cfg); + mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), + glo_cfg); + spin_unlock_irqrestore(ð->page_lock, flags); + + /* wait for dma stop */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, glo_cfg); + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { + msleep(20); + continue; + } + break; + } +} + +static int mtk_stop(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + netif_tx_disable(dev); + phy_stop(mac->phy_dev); + + /* only shutdown DMA if this is the last user */ + if (!atomic_dec_and_test(ð->dma_refcnt)) + return 0; + + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + napi_disable(ð->rx_napi); + + mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); + + mtk_dma_free(eth); + + return 0; +} + +static int __init mtk_hw_init(struct mtk_eth *eth) +{ + int err, i; + + /* reset the frame engine */ + reset_control_assert(eth->rstc); + usleep_range(10, 20); + reset_control_deassert(eth->rstc); + usleep_range(10, 20); + + /* Set GE2 driving and slew rate */ + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); + + /* set GE2 TDSEL */ + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); + + /* set GE2 TUNE */ + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); + + /* GE1, Force 1000M/FD, FC ON */ + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(0)); + + /* GE2, Force 1000M/FD, FC ON */ + mtk_w32(eth, MAC_MCR_FIXED_LINK, MTK_MAC_MCR(1)); + + /* Enable RX VLan Offloading */ + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + + err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0, + dev_name(eth->dev), eth); + if (err) + return err; + + err = mtk_mdio_init(eth); + if (err) + return err; + + /* disable delay and normal interrupt */ + mtk_w32(eth, 0, MTK_QDMA_DELAY_INT); + mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT); + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); + + /* FE int grouping */ + mtk_w32(eth, 0, MTK_FE_INT_GRP); + + for (i = 0; i < 2; i++) { + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + + /* setup the forward port to send frame to QDMA */ + val &= ~0xffff; + val |= 0x5555; + + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + + /* setup the mac dma */ + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); + } + + return 0; +} + +static int __init mtk_init(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + const char *mac_addr; + + mac_addr = of_get_mac_address(mac->of_node); + if (mac_addr) + ether_addr_copy(dev->dev_addr, mac_addr); + + /* If the mac address is invalid, use random mac address */ + if (!is_valid_ether_addr(dev->dev_addr)) { + random_ether_addr(dev->dev_addr); + dev_err(eth->dev, "generated random MAC address %pM\n", + dev->dev_addr); + dev->addr_assign_type = NET_ADDR_RANDOM; + } + + return mtk_phy_connect(mac); +} + +static void mtk_uninit(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + phy_disconnect(mac->phy_dev); + mtk_mdio_cleanup(eth); + mtk_irq_disable(eth, ~0); + free_irq(dev->irq, dev); +} + +static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phy_mii_ioctl(mac->phy_dev, ifr, cmd); + default: + break; + } + + return -EOPNOTSUPP; +} + +static void mtk_pending_work(struct work_struct *work) +{ + struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work); + struct mtk_eth *eth = mac->hw; + struct net_device *dev = eth->netdev[mac->id]; + int err; + + rtnl_lock(); + mtk_stop(dev); + + err = mtk_open(dev); + if (err) { + netif_alert(eth, ifup, dev, + "Driver up/down cycle failed, closing device.\n"); + dev_close(dev); + } + rtnl_unlock(); +} + +static int mtk_cleanup(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + struct mtk_mac *mac = netdev_priv(eth->netdev[i]); + + if (!eth->netdev[i]) + continue; + + unregister_netdev(eth->netdev[i]); + free_netdev(eth->netdev[i]); + cancel_work_sync(&mac->pending_work); + } + + return 0; +} + +static int mtk_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + int err; + + err = phy_read_status(mac->phy_dev); + if (err) + return -ENODEV; + + return phy_ethtool_gset(mac->phy_dev, cmd); +} + +static int mtk_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + + if (cmd->phy_address != mac->phy_dev->mdio.addr) { + mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus, + cmd->phy_address); + if (!mac->phy_dev) + return -ENODEV; + } + + return phy_ethtool_sset(mac->phy_dev, cmd); +} + +static void mtk_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + + strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); + strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); + info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); +} + +static u32 mtk_get_msglevel(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return mac->hw->msg_enable; +} + +static void mtk_set_msglevel(struct net_device *dev, u32 value) +{ + struct mtk_mac *mac = netdev_priv(dev); + + mac->hw->msg_enable = value; +} + +static int mtk_nway_reset(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return genphy_restart_aneg(mac->phy_dev); +} + +static u32 mtk_get_link(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + int err; + + err = genphy_update_link(mac->phy_dev); + if (err) + return ethtool_op_get_link(dev); + + return mac->phy_dev->link; +} + +static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { + memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + } +} + +static int mtk_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(mtk_ethtool_stats); + default: + return -EOPNOTSUPP; + } +} + +static void mtk_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hwstats = mac->hw_stats; + u64 *data_src, *data_dst; + unsigned int start; + int i; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock(&hwstats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock(&hwstats->stats_lock); + } + } + + do { + data_src = (u64*)hwstats; + data_dst = data; + start = u64_stats_fetch_begin_irq(&hwstats->syncp); + + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) + *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); +} + +static struct ethtool_ops mtk_ethtool_ops = { + .get_settings = mtk_get_settings, + .set_settings = mtk_set_settings, + .get_drvinfo = mtk_get_drvinfo, + .get_msglevel = mtk_get_msglevel, + .set_msglevel = mtk_set_msglevel, + .nway_reset = mtk_nway_reset, + .get_link = mtk_get_link, + .get_strings = mtk_get_strings, + .get_sset_count = mtk_get_sset_count, + .get_ethtool_stats = mtk_get_ethtool_stats, +}; + +static const struct net_device_ops mtk_netdev_ops = { + .ndo_init = mtk_init, + .ndo_uninit = mtk_uninit, + .ndo_open = mtk_open, + .ndo_stop = mtk_stop, + .ndo_start_xmit = mtk_start_xmit, + .ndo_set_mac_address = mtk_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_do_ioctl = mtk_do_ioctl, + .ndo_change_mtu = eth_change_mtu, + .ndo_tx_timeout = mtk_tx_timeout, + .ndo_get_stats64 = mtk_get_stats64, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mtk_poll_controller, +#endif +}; + +static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) +{ + struct mtk_mac *mac; + const __be32 *_id = of_get_property(np, "reg", NULL); + int id, err; + + if (!_id) { + dev_err(eth->dev, "missing mac id\n"); + return -EINVAL; + } + + id = be32_to_cpup(_id); + if (id >= MTK_MAC_COUNT) { + dev_err(eth->dev, "%d is not a valid mac id\n", id); + return -EINVAL; + } + + if (eth->netdev[id]) { + dev_err(eth->dev, "duplicate mac id found: %d\n", id); + return -EINVAL; + } + + eth->netdev[id] = alloc_etherdev(sizeof(*mac)); + if (!eth->netdev[id]) { + dev_err(eth->dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + mac = netdev_priv(eth->netdev[id]); + eth->mac[id] = mac; + mac->id = id; + mac->hw = eth; + mac->of_node = np; + INIT_WORK(&mac->pending_work, mtk_pending_work); + + mac->hw_stats = devm_kzalloc(eth->dev, + sizeof(*mac->hw_stats), + GFP_KERNEL); + if (!mac->hw_stats) { + dev_err(eth->dev, "failed to allocate counter memory\n"); + err = -ENOMEM; + goto free_netdev; + } + spin_lock_init(&mac->hw_stats->stats_lock); + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + + SET_NETDEV_DEV(eth->netdev[id], eth->dev); + eth->netdev[id]->netdev_ops = &mtk_netdev_ops; + eth->netdev[id]->base_addr = (unsigned long)eth->base; + eth->netdev[id]->vlan_features = MTK_HW_FEATURES & + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + eth->netdev[id]->features |= MTK_HW_FEATURES; + eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; + + err = register_netdev(eth->netdev[id]); + if (err) { + dev_err(eth->dev, "error bringing up device\n"); + goto free_netdev; + } + eth->netdev[id]->irq = eth->irq; + netif_info(eth, probe, eth->netdev[id], + "mediatek frame engine at 0x%08lx, irq %d\n", + eth->netdev[id]->base_addr, eth->netdev[id]->irq); + + return 0; + +free_netdev: + free_netdev(eth->netdev[id]); + return err; +} + +static int mtk_probe(struct platform_device *pdev) +{ + struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + struct device_node *mac_np; + const struct of_device_id *match; + struct mtk_soc_data *soc; + struct mtk_eth *eth; + int err; + + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + + device_reset(&pdev->dev); + + match = of_match_device(of_mtk_match, &pdev->dev); + soc = (struct mtk_soc_data *)match->data; + + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); + if (!eth) + return -ENOMEM; + + eth->base = devm_ioremap_resource(&pdev->dev, res); + if (!eth->base) + return -EADDRNOTAVAIL; + + spin_lock_init(ð->page_lock); + + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); + if (IS_ERR(eth->ethsys)) { + dev_err(&pdev->dev, "no ethsys regmap found\n"); + return PTR_ERR(eth->ethsys); + } + + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,pctl"); + if (IS_ERR(eth->pctl)) { + dev_err(&pdev->dev, "no pctl regmap found\n"); + return PTR_ERR(eth->pctl); + } + + eth->rstc = devm_reset_control_get(&pdev->dev, "eth"); + if (IS_ERR(eth->rstc)) { + dev_err(&pdev->dev, "no eth reset found\n"); + return PTR_ERR(eth->rstc); + } + + eth->irq = platform_get_irq(pdev, 0); + if (eth->irq < 0) { + dev_err(&pdev->dev, "no IRQ resource found\n"); + return -ENXIO; + } + + eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif"); + eth->clk_esw = devm_clk_get(&pdev->dev, "esw"); + eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1"); + eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2"); + if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) || + IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif)) + return -ENODEV; + + clk_prepare_enable(eth->clk_ethif); + clk_prepare_enable(eth->clk_esw); + clk_prepare_enable(eth->clk_gp1); + clk_prepare_enable(eth->clk_gp2); + + eth->dev = &pdev->dev; + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + + err = mtk_hw_init(eth); + if (err) + return err; + + for_each_child_of_node(pdev->dev.of_node, mac_np) { + if (!of_device_is_compatible(mac_np, + "mediatek,eth-mac")) + continue; + + if (!of_device_is_available(mac_np)) + continue; + + err = mtk_add_mac(eth, mac_np); + if (err) + goto err_free_dev; + } + + /* we run 2 devices on the same DMA ring so we need a dummy device + * for NAPI to work + */ + init_dummy_netdev(ð->dummy_dev); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_poll, + MTK_NAPI_WEIGHT); + + platform_set_drvdata(pdev, eth); + + return 0; + +err_free_dev: + mtk_cleanup(eth); + return err; +} + +static int mtk_remove(struct platform_device *pdev) +{ + struct mtk_eth *eth = platform_get_drvdata(pdev); + + clk_disable_unprepare(eth->clk_ethif); + clk_disable_unprepare(eth->clk_esw); + clk_disable_unprepare(eth->clk_gp1); + clk_disable_unprepare(eth->clk_gp2); + + netif_napi_del(ð->rx_napi); + mtk_cleanup(eth); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +const struct of_device_id of_mtk_match[] = { + { .compatible = "mediatek,mt7623-eth" }, + {}, +}; + +static struct platform_driver mtk_driver = { + .probe = mtk_probe, + .remove = mtk_remove, + .driver = { + .name = "mtk_soc_eth", + .owner = THIS_MODULE, + .of_match_table = of_mtk_match, + }, +}; + +module_platform_driver(mtk_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin "); +MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h new file mode 100644 index 000000000000..48a5292c8ed8 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -0,0 +1,421 @@ +/* This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2009-2016 John Crispin + * Copyright (C) 2009-2016 Felix Fietkau + * Copyright (C) 2013-2016 Michael Lee + */ + +#ifndef MTK_ETH_H +#define MTK_ETH_H + +#define MTK_QDMA_PAGE_SIZE 2048 +#define MTK_MAX_RX_LENGTH 1536 +#define MTK_TX_DMA_BUF_LEN 0x3fff +#define MTK_DMA_SIZE 256 +#define MTK_NAPI_WEIGHT 64 +#define MTK_MAC_COUNT 2 +#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) +#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) +#define MTK_DMA_DUMMY_DESC 0xffffffff +#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) +#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_SG | NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_IPV6_CSUM) +#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (MTK_DMA_SIZE - 1)) + +/* Frame Engine Global Reset Register */ +#define MTK_RST_GL 0x04 +#define RST_GL_PSE BIT(0) + +/* Frame Engine Interrupt Status Register */ +#define MTK_INT_STATUS2 0x08 +#define MTK_GDM1_AF BIT(28) +#define MTK_GDM2_AF BIT(29) + +/* Frame Engine Interrupt Grouping Register */ +#define MTK_FE_INT_GRP 0x20 + +/* CDMP Exgress Control Register */ +#define MTK_CDMP_EG_CTRL 0x404 + +/* GDM Exgress Control Register */ +#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_ICS_EN BIT(22) +#define MTK_GDMA_TCS_EN BIT(21) +#define MTK_GDMA_UCS_EN BIT(20) + +/* Unicast Filter MAC Address Register - Low */ +#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) + +/* Unicast Filter MAC Address Register - High */ +#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) + +/* QDMA TX Queue Configuration Registers */ +#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10)) +#define QDMA_RES_THRES 4 + +/* QDMA TX Queue Scheduler Registers */ +#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10)) + +/* QDMA RX Base Pointer Register */ +#define MTK_QRX_BASE_PTR0 0x1900 + +/* QDMA RX Maximum Count Register */ +#define MTK_QRX_MAX_CNT0 0x1904 + +/* QDMA RX CPU Pointer Register */ +#define MTK_QRX_CRX_IDX0 0x1908 + +/* QDMA RX DMA Pointer Register */ +#define MTK_QRX_DRX_IDX0 0x190C + +/* QDMA Global Configuration Register */ +#define MTK_QDMA_GLO_CFG 0x1A04 +#define MTK_RX_2B_OFFSET BIT(31) +#define MTK_RX_BT_32DWORDS (3 << 11) +#define MTK_TX_WB_DDONE BIT(6) +#define MTK_DMA_SIZE_16DWORDS (2 << 4) +#define MTK_RX_DMA_BUSY BIT(3) +#define MTK_TX_DMA_BUSY BIT(1) +#define MTK_RX_DMA_EN BIT(2) +#define MTK_TX_DMA_EN BIT(0) +#define MTK_DMA_BUSY_TIMEOUT HZ + +/* QDMA Reset Index Register */ +#define MTK_QDMA_RST_IDX 0x1A08 +#define MTK_PST_DRX_IDX0 BIT(16) + +/* QDMA Delay Interrupt Register */ +#define MTK_QDMA_DELAY_INT 0x1A0C + +/* QDMA Flow Control Register */ +#define MTK_QDMA_FC_THRES 0x1A10 +#define FC_THRES_DROP_MODE BIT(20) +#define FC_THRES_DROP_EN (7 << 16) +#define FC_THRES_MIN 0x4444 + +/* QDMA Interrupt Status Register */ +#define MTK_QMTK_INT_STATUS 0x1A18 +#define MTK_RX_DONE_INT1 BIT(17) +#define MTK_RX_DONE_INT0 BIT(16) +#define MTK_TX_DONE_INT3 BIT(3) +#define MTK_TX_DONE_INT2 BIT(2) +#define MTK_TX_DONE_INT1 BIT(1) +#define MTK_TX_DONE_INT0 BIT(0) +#define MTK_RX_DONE_INT (MTK_RX_DONE_INT0 | MTK_RX_DONE_INT1) +#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \ + MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3) + +/* QDMA Interrupt Status Register */ +#define MTK_QDMA_INT_MASK 0x1A1C + +/* QDMA Interrupt Mask Register */ +#define MTK_QDMA_HRED2 0x1A44 + +/* QDMA TX Forward CPU Pointer Register */ +#define MTK_QTX_CTX_PTR 0x1B00 + +/* QDMA TX Forward DMA Pointer Register */ +#define MTK_QTX_DTX_PTR 0x1B04 + +/* QDMA TX Release CPU Pointer Register */ +#define MTK_QTX_CRX_PTR 0x1B10 + +/* QDMA TX Release DMA Pointer Register */ +#define MTK_QTX_DRX_PTR 0x1B14 + +/* QDMA FQ Head Pointer Register */ +#define MTK_QDMA_FQ_HEAD 0x1B20 + +/* QDMA FQ Head Pointer Register */ +#define MTK_QDMA_FQ_TAIL 0x1B24 + +/* QDMA FQ Free Page Counter Register */ +#define MTK_QDMA_FQ_CNT 0x1B28 + +/* QDMA FQ Free Page Buffer Length Register */ +#define MTK_QDMA_FQ_BLEN 0x1B2C + +/* GMA1 Received Good Byte Count Register */ +#define MTK_GDM1_TX_GBCNT 0x2400 +#define MTK_STAT_OFFSET 0x40 + +/* QDMA descriptor txd4 */ +#define TX_DMA_CHKSUM (0x7 << 29) +#define TX_DMA_TSO BIT(28) +#define TX_DMA_FPORT_SHIFT 25 +#define TX_DMA_FPORT_MASK 0x7 +#define TX_DMA_INS_VLAN BIT(16) + +/* QDMA descriptor txd3 */ +#define TX_DMA_OWNER_CPU BIT(31) +#define TX_DMA_LS0 BIT(30) +#define TX_DMA_PLEN0(_x) (((_x) & MTK_TX_DMA_BUF_LEN) << 16) +#define TX_DMA_SWC BIT(14) +#define TX_DMA_SDL(_x) (((_x) & 0x3fff) << 16) + +/* QDMA descriptor rxd2 */ +#define RX_DMA_DONE BIT(31) +#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16) +#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff) + +/* QDMA descriptor rxd3 */ +#define RX_DMA_VID(_x) ((_x) & 0xfff) + +/* QDMA descriptor rxd4 */ +#define RX_DMA_L4_VALID BIT(24) +#define RX_DMA_FPORT_SHIFT 19 +#define RX_DMA_FPORT_MASK 0x7 + +/* PHY Indirect Access Control registers */ +#define MTK_PHY_IAC 0x10004 +#define PHY_IAC_ACCESS BIT(31) +#define PHY_IAC_READ BIT(19) +#define PHY_IAC_WRITE BIT(18) +#define PHY_IAC_START BIT(16) +#define PHY_IAC_ADDR_SHIFT 20 +#define PHY_IAC_REG_SHIFT 25 +#define PHY_IAC_TIMEOUT HZ + +/* Mac control registers */ +#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) +#define MAC_MCR_MAX_RX_1536 BIT(24) +#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) +#define MAC_MCR_FORCE_MODE BIT(15) +#define MAC_MCR_TX_EN BIT(14) +#define MAC_MCR_RX_EN BIT(13) +#define MAC_MCR_BACKOFF_EN BIT(9) +#define MAC_MCR_BACKPR_EN BIT(8) +#define MAC_MCR_FORCE_RX_FC BIT(5) +#define MAC_MCR_FORCE_TX_FC BIT(4) +#define MAC_MCR_SPEED_1000 BIT(3) +#define MAC_MCR_SPEED_100 BIT(2) +#define MAC_MCR_FORCE_DPX BIT(1) +#define MAC_MCR_FORCE_LINK BIT(0) +#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | \ + MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \ + MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \ + MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \ + MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \ + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK) + +/* GPIO port control registers for GMAC 2*/ +#define GPIO_OD33_CTRL8 0x4c0 +#define GPIO_BIAS_CTRL 0xed0 +#define GPIO_DRV_SEL10 0xf00 + +/* ethernet subsystem config register */ +#define ETHSYS_SYSCFG0 0x14 +#define SYSCFG0_GE_MASK 0x3 +#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) + +struct mtk_rx_dma { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; +} __packed __aligned(4); + +struct mtk_tx_dma { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; +} __packed __aligned(4); + +struct mtk_eth; +struct mtk_mac; + +/* struct mtk_hw_stats - the structure that holds the traffic statistics. + * @stats_lock: make sure that stats operations are atomic + * @reg_offset: the status register offset of the SoC + * @syncp: the refcount + * + * All of the supported SoCs have hardware counters for traffic statistics. + * Whenever the status IRQ triggers we can read the latest stats from these + * counters and store them in this struct. + */ +struct mtk_hw_stats { + u64 tx_bytes; + u64 tx_packets; + u64 tx_skip; + u64 tx_collisions; + u64 rx_bytes; + u64 rx_packets; + u64 rx_overflow; + u64 rx_fcs_errors; + u64 rx_short_errors; + u64 rx_long_errors; + u64 rx_checksum_errors; + u64 rx_flow_control_packets; + + spinlock_t stats_lock; + u32 reg_offset; + struct u64_stats_sync syncp; +}; + +/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how + * memory was allocated so that it can be freed properly + */ +enum mtk_tx_flags { + MTK_TX_FLAGS_SINGLE0 = 0x01, + MTK_TX_FLAGS_PAGE0 = 0x02, +}; + +/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at + * by the TX descriptor s + * @skb: The SKB pointer of the packet being sent + * @dma_addr0: The base addr of the first segment + * @dma_len0: The length of the first segment + * @dma_addr1: The base addr of the second segment + * @dma_len1: The length of the second segment + */ +struct mtk_tx_buf { + struct sk_buff *skb; + u32 flags; + DEFINE_DMA_UNMAP_ADDR(dma_addr0); + DEFINE_DMA_UNMAP_LEN(dma_len0); + DEFINE_DMA_UNMAP_ADDR(dma_addr1); + DEFINE_DMA_UNMAP_LEN(dma_len1); +}; + +/* struct mtk_tx_ring - This struct holds info describing a TX ring + * @dma: The descriptor ring + * @buf: The memory pointed at by the ring + * @phys: The physical addr of tx_buf + * @next_free: Pointer to the next free descriptor + * @last_free: Pointer to the last free descriptor + * @thresh: The threshold of minimum amount of free descriptors + * @free_count: QDMA uses a linked list. Track how many free descriptors + * are present + */ +struct mtk_tx_ring { + struct mtk_tx_dma *dma; + struct mtk_tx_buf *buf; + dma_addr_t phys; + struct mtk_tx_dma *next_free; + struct mtk_tx_dma *last_free; + u16 thresh; + atomic_t free_count; +}; + +/* struct mtk_rx_ring - This struct holds info describing a RX ring + * @dma: The descriptor ring + * @data: The memory pointed at by the ring + * @phys: The physical addr of rx_buf + * @frag_size: How big can each fragment be + * @buf_size: The size of each packet buffer + * @calc_idx: The current head of ring + */ +struct mtk_rx_ring { + struct mtk_rx_dma *dma; + u8 **data; + dma_addr_t phys; + u16 frag_size; + u16 buf_size; + u16 calc_idx; +}; + +/* currently no SoC has more than 2 macs */ +#define MTK_MAX_DEVS 2 + +/* struct mtk_eth - This is the main datasructure for holding the state + * of the driver + * @dev: The device pointer + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a + * dummy for NAPI to work + * @netdev: The netdev instances + * @mac: Each netdev is linked to a physical MAC + * @irq: The IRQ that we are using + * @msg_enable: Ethtool msg level + * @ethsys: The register map pointing at the range used to setup + * MII modes + * @pctl: The register map pointing at the range used to setup + * GMAC port drive/slew values + * @dma_refcnt: track how many netdevs are using the DMA engine + * @tx_ring: Pointer to the memore holding info about the TX ring + * @rx_ring: Pointer to the memore holding info about the RX ring + * @rx_napi: The NAPI struct + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @scratch_head: The scratch memory that scratch_ring points to. + * @clk_ethif: The ethif clock + * @clk_esw: The switch clock + * @clk_gp1: The gmac1 clock + * @clk_gp2: The gmac2 clock + * @mii_bus: If there is a bus we need to create an instance for it + */ + +struct mtk_eth { + struct device *dev; + void __iomem *base; + struct reset_control *rstc; + spinlock_t page_lock; + struct net_device dummy_dev; + struct net_device *netdev[MTK_MAX_DEVS]; + struct mtk_mac *mac[MTK_MAX_DEVS]; + int irq; + u32 msg_enable; + unsigned long sysclk; + struct regmap *ethsys; + struct regmap *pctl; + atomic_t dma_refcnt; + struct mtk_tx_ring tx_ring; + struct mtk_rx_ring rx_ring; + struct napi_struct rx_napi; + struct mtk_tx_dma *scratch_ring; + void *scratch_head; + struct clk *clk_ethif; + struct clk *clk_esw; + struct clk *clk_gp1; + struct clk *clk_gp2; + struct mii_bus *mii_bus; +}; + +/* struct mtk_mac - the structure that holds the info about the MACs of the + * SoC + * @id: The number of the MAC + * @of_node: Our devicetree node + * @hw: Backpointer to our main datastruture + * @hw_stats: Packet statistics counter + * @phy_dev: The attached PHY if available + * @pending_work: The workqueue used to reset the dma ring + */ +struct mtk_mac { + int id; + struct device_node *of_node; + struct mtk_eth *hw; + struct mtk_hw_stats *hw_stats; + struct phy_device *phy_dev; + struct work_struct pending_work; +}; + +/* the struct describing the SoC. these are declared in the soc_xyz.c files */ +extern const struct of_device_id of_mtk_match[]; + +/* read the hardware status register */ +void mtk_stats_update_mac(struct mtk_mac *mac); + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); +u32 mtk_r32(struct mtk_eth *eth, unsigned reg); + +#endif /* MTK_ETH_H */ -- cgit v1.2.3 From 009fb0978ca4b5362cf875869c42893cc43b71a1 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Tue, 8 Mar 2016 11:29:56 +0100 Subject: net-next: mediatek: add Kconfig and Makefile This patch adds the Makefile and Kconfig required to make the driver build. Signed-off-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/Kconfig | 1 + drivers/net/ethernet/Makefile | 1 + drivers/net/ethernet/mediatek/Kconfig | 17 +++++++++++++++++ drivers/net/ethernet/mediatek/Makefile | 5 +++++ 4 files changed, 24 insertions(+) create mode 100644 drivers/net/ethernet/mediatek/Kconfig create mode 100644 drivers/net/ethernet/mediatek/Makefile (limited to 'drivers') diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 0b13af8e4070..be67a19e01b9 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -106,6 +106,7 @@ config LANTIQ_ETOP Support for the MII0 inside the Lantiq SoC source "drivers/net/ethernet/marvell/Kconfig" +source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 38dc1a776a2b..6ffcc801d37e 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ +obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig new file mode 100644 index 000000000000..698bb89aa901 --- /dev/null +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -0,0 +1,17 @@ +config NET_VENDOR_MEDIATEK + bool "MediaTek ethernet driver" + depends on ARCH_MEDIATEK + ---help--- + If you have a Mediatek SoC with ethernet, say Y. + +if NET_VENDOR_MEDIATEK + +config NET_MEDIATEK_SOC + tristate "MediaTek MT7623 Gigabit ethernet support" + depends on NET_VENDOR_MEDIATEK && (MACH_MT7623 || MACH_MT2701) + select PHYLIB + ---help--- + This driver supports the gigabit ethernet MACs in the + MediaTek MT2701/MT7623 chipset family. + +endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile new file mode 100644 index 000000000000..aa3f1c8ccd4a --- /dev/null +++ b/drivers/net/ethernet/mediatek/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Mediatek SoCs built-in ethernet macs +# + +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth_soc.o -- cgit v1.2.3 From 60ab4584f5bf4e496baec7b090a4b382d03b2100 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 8 Mar 2016 12:42:33 +0200 Subject: net/mlx5_core: Set flow steering dest only for forward rules We need to handle flow table entry destinations only if the action associated with the rule is forwarding (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST). Fixes: 26a8145390b3 ('net/mlx5_core: Introduce flow steering firmware commands') Signed-off-by: Amir Vadai Signed-off-by: Maor Gottlieb Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 29 +++++++++++++---------- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 18 +++++++++----- 2 files changed, 28 insertions(+), 19 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index a9894d2e8e26..f46f1db0fc00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -218,19 +218,22 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, match_value); memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param)); - in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); - list_for_each_entry(dst, &fte->node.children, node.list) { - unsigned int id; - - MLX5_SET(dest_format_struct, in_dests, destination_type, - dst->dest_attr.type); - if (dst->dest_attr.type == - MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) - id = dst->dest_attr.ft->id; - else - id = dst->dest_attr.tir_num; - MLX5_SET(dest_format_struct, in_dests, destination_id, id); - in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { + in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination); + list_for_each_entry(dst, &fte->node.children, node.list) { + unsigned int id; + + MLX5_SET(dest_format_struct, in_dests, destination_type, + dst->dest_attr.type); + if (dst->dest_attr.type == + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) { + id = dst->dest_attr.ft->id; + } else { + id = dst->dest_attr.tir_num; + } + MLX5_SET(dest_format_struct, in_dests, destination_id, id); + in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + } } memset(out, 0, sizeof(out)); err = mlx5_cmd_exec_check_status(dev, in, inlen, out, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 6f68dba8d7ed..f0e67d207d6e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -360,8 +360,8 @@ static void del_rule(struct fs_node *node) memcpy(match_value, fte->val, sizeof(fte->val)); fs_get_obj(ft, fg->node.parent); list_del(&rule->node.list); - fte->dests_size--; - if (fte->dests_size) { + if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && + --fte->dests_size) { err = mlx5_cmd_update_fte(dev, ft, fg->id, fte); if (err) @@ -763,7 +763,8 @@ static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest) return NULL; rule->node.type = FS_TYPE_FLOW_DEST; - memcpy(&rule->dest_attr, dest, sizeof(*dest)); + if (dest) + memcpy(&rule->dest_attr, dest, sizeof(*dest)); return rule; } @@ -785,8 +786,9 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, /* Add dest to dests list- added as first element after the head */ tree_init_node(&rule->node, 1, del_rule); list_add_tail(&rule->node.list, &fte->node.children); - fte->dests_size++; - if (fte->dests_size == 1) + if (dest) + fte->dests_size++; + if (fte->dests_size == 1 || !dest) err = mlx5_cmd_create_fte(get_dev(&ft->node), ft, fg->id, fte); else @@ -802,7 +804,8 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte, free_rule: list_del(&rule->node.list); kfree(rule); - fte->dests_size--; + if (dest) + fte->dests_size--; return ERR_PTR(err); } @@ -996,6 +999,9 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft, struct mlx5_flow_group *g; struct mlx5_flow_rule *rule; + if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest) + return ERR_PTR(-EINVAL); + nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT); fs_for_each_fg(g, ft) if (compare_match_criteria(g->mask.match_criteria_enable, -- cgit v1.2.3 From 67ba422e95edfbd42f1d9782868880b364eea173 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 8 Mar 2016 12:42:34 +0200 Subject: net/mlx5e: Relax ndo_setup_tc handle restriction Restricting handle to TC_H_ROOT breaks the old instantiation of mqprio to setup a hardware qdisc. This patch relaxes the test, to only check the type. Fixes: 08fb1da ("net/mlx5e: Support DCBNL IEEE ETS") Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 19e5daeaa61d..f1ebe065de0d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1883,7 +1883,7 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { - if (handle != TC_H_ROOT || tc->type != TC_SETUP_MQPRIO) + if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; return mlx5e_setup_tc(dev, tc->tc); -- cgit v1.2.3 From b6172aac71e9fba0981e013839ced5fcd97746fb Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 8 Mar 2016 12:42:35 +0200 Subject: net/mlx5e: Add a new priority for kernel flow tables Move the vlan and main flow tables to use priority 1. This will allow the upcoming TC offload logic to use a higher priority (0) for the offload steering table. Signed-off-by: Amir Vadai Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 80d81abc4820..d00a24203410 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1041,7 +1041,7 @@ static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv) int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_MAIN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); @@ -1150,7 +1150,7 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv) int err; ft->num_groups = 0; - ft->t = mlx5_create_flow_table(priv->fts.ns, 0, MLX5E_VLAN_TABLE_SIZE); + ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f0e67d207d6e..e848d708d2b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -73,8 +73,8 @@ #define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ LEFTOVERS_MAX_FT) -#define KERNEL_MAX_FT 2 -#define KERNEL_NUM_PRIOS 1 +#define KERNEL_MAX_FT 3 +#define KERNEL_NUM_PRIOS 2 #define KENREL_MIN_LEVEL 2 struct node_caps { -- cgit v1.2.3 From e8f887ac6a45ecb7f881f278a3fed1fbf002ef0b Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 8 Mar 2016 12:42:36 +0200 Subject: net/mlx5e: Introduce tc offload support Extend ndo_setup_tc() to support ingress tc offloading. Will be used by later patches to offload tc flower filter. Feature is off by default and could be enabled by issuing: # ethtool -K eth0 hw-tc-offload on Offloads flow table is dynamically created when first filter is added. Rules are saved in a hash table that is maintained by the consumer (for example - the flower offload in the next patch). When last filter is removed and no filters exist in the hash table, the offload flow table is destroyed. Signed-off-by: Amir Vadai Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/Makefile | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en.h | 9 ++ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 38 ++++++- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 131 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 44 ++++++++ 5 files changed, 222 insertions(+), 2 deletions(-) create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c create mode 100644 drivers/net/ethernet/mellanox/mlx5/core/en_tc.h (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 11b592dbf16a..4fc45ee0c5d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -6,6 +6,6 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \ en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \ - en_txrx.o en_clock.o vxlan.o + en_txrx.o en_clock.o vxlan.o en_tc.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index dbc2fb89e067..0f76d321030f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -43,6 +43,7 @@ #include #include #include +#include #include "wq.h" #include "mlx5_core.h" @@ -527,8 +528,16 @@ struct mlx5e_flow_table { struct mlx5_flow_group **g; }; +struct mlx5e_tc_flow_table { + struct mlx5_flow_table *t; + + struct rhashtable_params ht_params; + struct rhashtable ht; +}; + struct mlx5e_flow_tables { struct mlx5_flow_namespace *ns; + struct mlx5e_tc_flow_table tc; struct mlx5e_flow_table vlan; struct mlx5e_flow_table main; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f1ebe065de0d..e6210485e128 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -30,9 +30,12 @@ * SOFTWARE. */ +#include +#include #include #include #include "en.h" +#include "en_tc.h" #include "eswitch.h" #include "vxlan.h" @@ -1883,6 +1886,17 @@ static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, __be16 proto, struct tc_to_netdev *tc) { + struct mlx5e_priv *priv = netdev_priv(dev); + + if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) + goto mqprio; + + switch (tc->type) { + default: + return -EOPNOTSUPP; + } + +mqprio: if (tc->type != TC_SETUP_MQPRIO) return -EINVAL; @@ -1968,6 +1982,13 @@ static int mlx5e_set_features(struct net_device *netdev, mlx5e_disable_vlan_filter(priv); } + if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) && + mlx5e_tc_num_filters(priv)) { + netdev_err(netdev, + "Active offloaded tc filters, can't turn hw_tc_offload off\n"); + return -EINVAL; + } + return err; } @@ -2375,6 +2396,13 @@ static void mlx5e_build_netdev(struct net_device *netdev) if (!priv->params.lro_en) netdev->features &= ~NETIF_F_LRO; +#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f) + if (FT_CAP(flow_modify_en) && + FT_CAP(modify_root) && + FT_CAP(identified_miss_table_mode) && + FT_CAP(flow_table_modify)) + priv->netdev->hw_features |= NETIF_F_HW_TC; + netdev->features |= NETIF_F_HIGHDMA; netdev->priv_flags |= IFF_UNICAST_FLT; @@ -2496,6 +2524,10 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) mlx5e_vxlan_init(priv); + err = mlx5e_tc_init(priv); + if (err) + goto err_destroy_flow_tables; + #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets); #endif @@ -2503,7 +2535,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) err = register_netdev(netdev); if (err) { mlx5_core_err(mdev, "register_netdev failed, %d\n", err); - goto err_destroy_flow_tables; + goto err_tc_cleanup; } if (mlx5e_vxlan_allowed(mdev)) @@ -2514,6 +2546,9 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev) return priv; +err_tc_cleanup: + mlx5e_tc_cleanup(priv); + err_destroy_flow_tables: mlx5e_destroy_flow_tables(priv); @@ -2561,6 +2596,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) mlx5e_disable_async_events(priv); flush_scheduled_work(); unregister_netdev(netdev); + mlx5e_tc_cleanup(priv); mlx5e_vxlan_cleanup(priv); mlx5e_destroy_flow_tables(priv); mlx5e_destroy_tirs(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c new file mode 100644 index 000000000000..1dc715d50244 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#include +#include +#include +#include "en.h" +#include "en_tc.h" + +struct mlx5e_tc_flow { + struct rhash_head node; + u64 cookie; + struct mlx5_flow_rule *rule; +}; + +#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024 +#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4 + +static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv, + u32 *match_c, u32 *match_v, + u32 action, u32 flow_tag) +{ + struct mlx5_flow_destination dest = { + .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE, + {.ft = priv->fts.vlan.t}, + }; + struct mlx5_flow_rule *rule; + bool table_created = false; + + if (IS_ERR_OR_NULL(priv->fts.tc.t)) { + priv->fts.tc.t = + mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0, + MLX5E_TC_FLOW_TABLE_NUM_ENTRIES, + MLX5E_TC_FLOW_TABLE_NUM_GROUPS); + if (IS_ERR(priv->fts.tc.t)) { + netdev_err(priv->netdev, + "Failed to create tc offload table\n"); + return ERR_CAST(priv->fts.tc.t); + } + + table_created = true; + } + + rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS, + match_c, match_v, + action, flow_tag, + action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL); + + if (IS_ERR(rule) && table_created) { + mlx5_destroy_flow_table(priv->fts.tc.t); + priv->fts.tc.t = NULL; + } + + return rule; +} + +static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5_flow_rule *rule) +{ + mlx5_del_flow_rule(rule); + + if (!mlx5e_tc_num_filters(priv)) { + mlx5_destroy_flow_table(priv->fts.tc.t); + priv->fts.tc.t = NULL; + } +} + +static const struct rhashtable_params mlx5e_tc_flow_ht_params = { + .head_offset = offsetof(struct mlx5e_tc_flow, node), + .key_offset = offsetof(struct mlx5e_tc_flow, cookie), + .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie), + .automatic_shrinking = true, +}; + +int mlx5e_tc_init(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + + tc->ht_params = mlx5e_tc_flow_ht_params; + return rhashtable_init(&tc->ht, &tc->ht_params); +} + +static void _mlx5e_tc_del_flow(void *ptr, void *arg) +{ + struct mlx5e_tc_flow *flow = ptr; + struct mlx5e_priv *priv = arg; + + mlx5e_tc_del_flow(priv, flow->rule); + kfree(flow); +} + +void mlx5e_tc_cleanup(struct mlx5e_priv *priv) +{ + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + + rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv); + + if (!IS_ERR_OR_NULL(priv->fts.tc.t)) { + mlx5_destroy_flow_table(priv->fts.tc.t); + priv->fts.tc.t = NULL; + } +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h new file mode 100644 index 000000000000..46eacc582f13 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2016, Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __MLX5_EN_TC_H__ +#define __MLX5_EN_TC_H__ + +int mlx5e_tc_init(struct mlx5e_priv *priv); +void mlx5e_tc_cleanup(struct mlx5e_priv *priv); + +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) +{ + return atomic_read(&priv->fts.tc.ht.nelems); +} + +#endif /* __MLX5_EN_TC_H__ */ -- cgit v1.2.3 From e3a2b7ed018e885721a1b664ed7bf0f3fe29c81c Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 8 Mar 2016 12:42:37 +0200 Subject: net/mlx5e: Support offload cls_flower with drop action Parse tc_cls_flower_offload into device specific commands and program the hardware to classify and act accordingly. For example, to drop ICMP (ip_proto 1) packets from specific smac, dmac, src_ip, src_ip, arriving to interface ens9: # tc qdisc add dev ens9 ingress # tc filter add dev ens9 protocol ip parent ffff: \ flower ip_proto 1 \ dst_mac 7c:fe:90:69:81:62 src_mac 7c:fe:90:69:81:56 \ dst_ip 11.11.11.11 src_ip 11.11.11.12 indev ens9 \ action drop Signed-off-by: Amir Vadai Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 7 + drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 297 ++++++++++++++++++++++ drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 5 + 3 files changed, 309 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index e6210485e128..ac5807803c84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1892,6 +1892,13 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, goto mqprio; switch (tc->type) { + case TC_SETUP_CLSFLOWER: + switch (tc->cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, proto, tc->cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, tc->cls_flower); + } default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1dc715d50244..3ed4d96da69a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -30,6 +30,9 @@ * SOFTWARE. */ +#include +#include +#include #include #include #include @@ -94,6 +97,300 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, } } +static int parse_cls_flower(struct mlx5e_priv *priv, + u32 *match_c, u32 *match_v, + struct tc_cls_flower_offload *f) +{ + void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers); + void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); + u16 addr_type = 0; + u8 ip_proto = 0; + + if (f->dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | + BIT(FLOW_DISSECTOR_KEY_PORTS))) { + netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", + f->dissector->used_keys); + return -EOPNOTSUPP; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_dissector_key_control *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + addr_type = key->addr_type; + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_dissector_key_basic *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->key); + struct flow_dissector_key_basic *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_BASIC, + f->mask); + ip_proto = key->ip_proto; + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, + ntohs(mask->n_proto)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, + ntohs(key->n_proto)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol, + mask->ip_proto); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, + key->ip_proto); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_dissector_key_eth_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->key); + struct flow_dissector_key_eth_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ETH_ADDRS, + f->mask); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dmac_47_16), + mask->dst); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dmac_47_16), + key->dst); + + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + smac_47_16), + mask->src); + ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + smac_47_16), + key->src); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_dissector_key_ipv4_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->key); + struct flow_dissector_key_ipv4_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &mask->src, sizeof(mask->src)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv4_layout.ipv4), + &key->src, sizeof(key->src)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &mask->dst, sizeof(mask->dst)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv4_layout.ipv4), + &key->dst, sizeof(key->dst)); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_dissector_key_ipv6_addrs *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->key); + struct flow_dissector_key_ipv6_addrs *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, + f->mask); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &mask->src, sizeof(mask->src)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + src_ipv4_src_ipv6.ipv6_layout.ipv6), + &key->src, sizeof(key->src)); + + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &mask->dst, sizeof(mask->dst)); + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, + dst_ipv4_dst_ipv6.ipv6_layout.ipv6), + &key->dst, sizeof(key->dst)); + } + + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_PORTS, + f->mask); + switch (ip_proto) { + case IPPROTO_TCP: + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + tcp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + tcp_sport, ntohs(key->src)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + tcp_dport, ntohs(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + tcp_dport, ntohs(key->dst)); + break; + + case IPPROTO_UDP: + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_sport, ntohs(key->src)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, + udp_dport, ntohs(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + udp_dport, ntohs(key->dst)); + break; + default: + netdev_err(priv->netdev, + "Only UDP and TCP transport are supported\n"); + return -EINVAL; + } + } + + return 0; +} + +static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, + u32 *action, u32 *flow_tag) +{ + const struct tc_action *a; + + if (tc_no_actions(exts)) + return -EINVAL; + + *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + *action = 0; + + tc_for_each_action(a, exts) { + /* Only support a single action per rule */ + if (*action) + return -EINVAL; + + if (is_tcf_gact_shot(a)) { + *action |= MLX5_FLOW_CONTEXT_ACTION_DROP; + continue; + } + + if (is_tcf_skbedit_mark(a)) { + u32 mark = tcf_skbedit_mark(a); + + if (mark & ~MLX5E_TC_FLOW_ID_MASK) { + netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n", + mark); + return -EINVAL; + } + + *flow_tag = mark; + *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + continue; + } + + return -EINVAL; + } + + return 0; +} + +int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, + struct tc_cls_flower_offload *f) +{ + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + u32 *match_c; + u32 *match_v; + int err = 0; + u32 flow_tag; + u32 action; + struct mlx5e_tc_flow *flow; + struct mlx5_flow_rule *old = NULL; + + flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, + tc->ht_params); + if (flow) + old = flow->rule; + else + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + + match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL); + if (!match_c || !match_v || !flow) { + err = -ENOMEM; + goto err_free; + } + + flow->cookie = f->cookie; + + err = parse_cls_flower(priv, match_c, match_v, f); + if (err < 0) + goto err_free; + + err = parse_tc_actions(priv, f->exts, &action, &flow_tag); + if (err < 0) + goto err_free; + + err = rhashtable_insert_fast(&tc->ht, &flow->node, + tc->ht_params); + if (err) + goto err_free; + + flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action, + flow_tag); + if (IS_ERR(flow->rule)) { + err = PTR_ERR(flow->rule); + goto err_hash_del; + } + + if (old) + mlx5e_tc_del_flow(priv, old); + + goto out; + +err_hash_del: + rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); + +err_free: + if (!old) + kfree(flow); +out: + kfree(match_c); + kfree(match_v); + return err; +} + +int mlx5e_delete_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f) +{ + struct mlx5e_tc_flow *flow; + struct mlx5e_tc_flow_table *tc = &priv->fts.tc; + + flow = rhashtable_lookup_fast(&tc->ht, &f->cookie, + tc->ht_params); + if (!flow) + return -EINVAL; + + rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params); + + mlx5e_tc_del_flow(priv, flow->rule); + + kfree(flow); + + return 0; +} + static const struct rhashtable_params mlx5e_tc_flow_ht_params = { .head_offset = offsetof(struct mlx5e_tc_flow, node), .key_offset = offsetof(struct mlx5e_tc_flow, cookie), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 46eacc582f13..70642f4f78f0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -36,6 +36,11 @@ int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); +int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol, + struct tc_cls_flower_offload *f); +int mlx5e_delete_flower(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f); + static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return atomic_read(&priv->fts.tc.ht.nelems); -- cgit v1.2.3 From 12185a9fafa9cf39b73588c92aa49300ff3bf191 Mon Sep 17 00:00:00 2001 From: Amir Vadai Date: Tue, 8 Mar 2016 12:42:38 +0200 Subject: net/mlx5e: Support offload cls_flower with skbedit mark action Introduce offloading of skbedit mark action. For example, to mark with 0x1234, all TCP (ip_proto 6) packets arriving to interface ens9: # tc qdisc add dev ens9 ingress # tc filter add dev ens9 protocol ip parent ffff: \ flower ip_proto 6 \ indev ens9 \ action skbedit mark 0x1234 Signed-off-by: Amir Vadai Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 3 +++ drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 1 + drivers/net/ethernet/mellanox/mlx5/core/en_tc.h | 2 ++ 3 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 884ed19cded2..58d4e2f962c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -35,6 +35,7 @@ #include #include #include "en.h" +#include "en_tc.h" static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp) { @@ -224,6 +225,8 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, if (cqe_has_vlan(cqe)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(cqe->vlan_info)); + + skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK; } int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3ed4d96da69a..b3de09f13425 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 70642f4f78f0..d677428dc10f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -33,6 +33,8 @@ #ifndef __MLX5_EN_TC_H__ #define __MLX5_EN_TC_H__ +#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff + int mlx5e_tc_init(struct mlx5e_priv *priv); void mlx5e_tc_cleanup(struct mlx5e_priv *priv); -- cgit v1.2.3 From 8e2ad4113ce4671686740f808ff2795395c39eef Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Tue, 8 Mar 2016 15:18:54 -0500 Subject: macvtap: always pass ethernet header in linear The stack expects link layer headers in the skb linear section. Macvtap can create skbs with llheader in frags in edge cases: when (IFF_VNET_HDR is off or vnet_hdr.hdr_len < ETH_HLEN) and prepad + len > PAGE_SIZE and vnet_hdr.flags has no or bad csum. Add checks to ensure linear is always at least ETH_HLEN. At this point, len is already ensured to be >= ETH_HLEN. For backwards compatiblity, rounds up short vnet_hdr.hdr_len. This differs from tap and packet, which return an error. Fixes b9fb9ee07e67 ("macvtap: add GSO/csum offload support") Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- drivers/net/macvtap.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index d636d051fac8..95394edd1ed5 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -760,6 +760,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; if (copylen > good_linear) copylen = good_linear; + else if (copylen < ETH_HLEN) + copylen = ETH_HLEN; linear = copylen; i = *from; iov_iter_advance(&i, copylen); @@ -769,10 +771,11 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, if (!zerocopy) { copylen = len; - if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > good_linear) + linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); + if (linear > good_linear) linear = good_linear; - else - linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); + else if (linear < ETH_HLEN) + linear = ETH_HLEN; } skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, -- cgit v1.2.3 From 869f63a4d28144c03c8f4a4c0d1e8f31f8c11a10 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Mar 2016 12:59:33 -0800 Subject: mlxsw: spectrum: Check requested ageing time is valid Commit c62987bbd8a1 ("bridge: push bridge setting ageing_time down to switchdev") added a check for minimum and maximum ageing time, but this breaks existing behaviour where one can set ageing time to 0 for a non-learning bridge. Push this check down to the driver and allow the check in the bridge layer to be removed. Currently ageing time 0 is refused by the driver, but we can later add support for this functionality. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/spectrum.h | 2 ++ drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | 9 +++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 1b691d7e4a2a..4b8abaf06321 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -123,6 +123,8 @@ struct mlxsw_sp { #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100 unsigned int interval; /* ms */ } fdb_notify; +#define MLXSW_SP_MIN_AGEING_TIME 10 +#define MLXSW_SP_MAX_AGEING_TIME 1000000 #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; struct mlxsw_sp_upper master_bridge; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 7b56098acc58..e1c74efff51a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -311,8 +311,13 @@ static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; - if (switchdev_trans_ph_prepare(trans)) - return 0; + if (switchdev_trans_ph_prepare(trans)) { + if (ageing_time < MLXSW_SP_MIN_AGEING_TIME || + ageing_time > MLXSW_SP_MAX_AGEING_TIME) + return -ERANGE; + else + return 0; + } return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time); } -- cgit v1.2.3 From 88de1cd457e5cb664d6d437e2ea4750d089165f5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Mar 2016 12:59:34 -0800 Subject: rocker: set FDB cleanup timer according to lowest ageing time In rocker, ageing time is a per-port attribute, so the next time the FDB cleanup timer fires should be set according to the lowest ageing time. This will later allow us to delete the BR_MIN_AGEING_TIME macro, which was added to guarantee minimum ageing time in the bridge layer, thereby breaking existing behavior. Signed-off-by: Ido Schimmel Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.h | 1 + drivers/net/ethernet/rocker/rocker_main.c | 2 ++ drivers/net/ethernet/rocker/rocker_ofdpa.c | 5 ++++- 3 files changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 1ab995f7146b..011f1b6f91bb 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -63,6 +63,7 @@ struct rocker { struct { u64 id; } hw; + unsigned long ageing_time; spinlock_t cmd_ring_lock; /* for cmd ring accesses */ struct rocker_dma_ring_info cmd_ring; struct rocker_dma_ring_info event_ring; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 28b775e5a9ad..c8b1bf474e89 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2733,6 +2733,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) rocker->hw.id = rocker_read64(rocker, SWITCH_ID); + rocker->ageing_time = BR_DEFAULT_AGEING_TIME; + err = rocker_probe_ports(rocker); if (err) { dev_err(&pdev->dev, "failed to probe ports\n"); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index 07218c360d86..cfde525d9123 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2105,7 +2105,7 @@ static void ofdpa_fdb_cleanup(unsigned long data) struct ofdpa_port *ofdpa_port; struct ofdpa_fdb_tbl_entry *entry; struct hlist_node *tmp; - unsigned long next_timer = jiffies + BR_MIN_AGEING_TIME; + unsigned long next_timer = jiffies + ofdpa->rocker->ageing_time; unsigned long expires; unsigned long lock_flags; int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | @@ -2648,9 +2648,12 @@ ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; + struct rocker *rocker = rocker_port->rocker; if (!switchdev_trans_ph_prepare(trans)) { ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); + if (ofdpa_port->ageing_time < rocker->ageing_time) + rocker->ageing_time = ofdpa_port->ageing_time; mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); } -- cgit v1.2.3 From e327f4e193b6a176f4dc3b19faf6371a8917588d Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 8 Mar 2016 13:54:56 -0800 Subject: cisco: enic: Update logging macros and uses Don't hide varibles used by the logging macros. Miscellanea: o Use the more common ##__VA_ARGS__ extension o Add missing newlines to formats o Realign arguments Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- drivers/net/ethernet/cisco/enic/enic.h | 22 ++++++++------ drivers/net/ethernet/cisco/enic/vnic_cq.c | 2 +- drivers/net/ethernet/cisco/enic/vnic_dev.c | 45 +++++++++++++++-------------- drivers/net/ethernet/cisco/enic/vnic_intr.c | 3 +- drivers/net/ethernet/cisco/enic/vnic_rq.c | 4 +-- drivers/net/ethernet/cisco/enic/vnic_wq.c | 4 +-- 6 files changed, 43 insertions(+), 37 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 7ba6d530b0c0..130f910e4785 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -201,16 +201,20 @@ static inline struct net_device *vnic_get_netdev(struct vnic_dev *vdev) } /* wrappers function for kernel log - * Make sure variable vdev of struct vnic_dev is available in the block where - * these macros are used */ -#define vdev_info(args...) dev_info(&vdev->pdev->dev, args) -#define vdev_warn(args...) dev_warn(&vdev->pdev->dev, args) -#define vdev_err(args...) dev_err(&vdev->pdev->dev, args) - -#define vdev_netinfo(args...) netdev_info(vnic_get_netdev(vdev), args) -#define vdev_netwarn(args...) netdev_warn(vnic_get_netdev(vdev), args) -#define vdev_neterr(args...) netdev_err(vnic_get_netdev(vdev), args) +#define vdev_err(vdev, fmt, ...) \ + dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) +#define vdev_warn(vdev, fmt, ...) \ + dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) +#define vdev_info(vdev, fmt, ...) \ + dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__) + +#define vdev_neterr(vdev, fmt, ...) \ + netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) +#define vdev_netwarn(vdev, fmt, ...) \ + netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) +#define vdev_netinfo(vdev, fmt, ...) \ + netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__) static inline struct device *enic_get_dev(struct enic *enic) { diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.c b/drivers/net/ethernet/cisco/enic/vnic_cq.c index abeda2a9ea27..9c682aff3834 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_cq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_cq.c @@ -43,7 +43,7 @@ int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index); if (!cq->ctrl) { - vdev_err("Failed to hook CQ[%d] resource\n", index); + vdev_err(vdev, "Failed to hook CQ[%d] resource\n", index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 1fdf5fe12a95..8f27df3207bc 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -53,14 +53,14 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, return -EINVAL; if (bar->len < VNIC_MAX_RES_HDR_SIZE) { - vdev_err("vNIC BAR0 res hdr length error\n"); + vdev_err(vdev, "vNIC BAR0 res hdr length error\n"); return -EINVAL; } rh = bar->vaddr; mrh = bar->vaddr; if (!rh) { - vdev_err("vNIC BAR0 res hdr not mem-mapped\n"); + vdev_err(vdev, "vNIC BAR0 res hdr not mem-mapped\n"); return -EINVAL; } @@ -69,7 +69,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, (ioread32(&rh->version) != VNIC_RES_VERSION)) { if ((ioread32(&mrh->magic) != MGMTVNIC_MAGIC) || (ioread32(&mrh->version) != MGMTVNIC_VERSION)) { - vdev_err("vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", + vdev_err(vdev, "vNIC BAR0 res magic/version error exp (%lx/%lx) or (%lx/%lx), curr (%x/%x)\n", VNIC_RES_MAGIC, VNIC_RES_VERSION, MGMTVNIC_MAGIC, MGMTVNIC_VERSION, ioread32(&rh->magic), ioread32(&rh->version)); @@ -106,7 +106,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev, /* each count is stride bytes long */ len = count * VNIC_RES_STRIDE; if (len + bar_offset > bar[bar_num].len) { - vdev_err("vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", + vdev_err(vdev, "vNIC BAR0 resource %d out-of-bounds, offset 0x%x + size 0x%x > bar len 0x%lx\n", type, bar_offset, len, bar[bar_num].len); return -EINVAL; @@ -198,7 +198,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { - vdev_err("Failed to allocate ring (size=%d), aborting\n", + vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } @@ -241,7 +241,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -ENODEV; } if (status & STAT_BUSY) { - vdev_neterr("Busy devcmd %d\n", _CMD_N(cmd)); + vdev_neterr(vdev, "Busy devcmd %d\n", _CMD_N(cmd)); return -EBUSY; } @@ -275,7 +275,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, return -err; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - vdev_neterr("Error %d devcmd %d\n", + vdev_neterr(vdev, "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } @@ -290,7 +290,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, } } - vdev_neterr("Timedout devcmd %d\n", _CMD_N(cmd)); + vdev_neterr(vdev, "Timedout devcmd %d\n", _CMD_N(cmd)); return -ETIMEDOUT; } @@ -313,7 +313,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, new_posted = (posted + 1) % DEVCMD2_RING_SIZE; if (new_posted == fetch_index) { - vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", + vdev_neterr(vdev, "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", _CMD_N(cmd), fetch_index, posted); return -EBUSY; } @@ -352,7 +352,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, err = result->error; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - vdev_neterr("Error %d devcmd %d\n", + vdev_neterr(vdev, "Error %d devcmd %d\n", err, _CMD_N(cmd)); return -err; } @@ -365,7 +365,7 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, udelay(100); } - vdev_neterr("devcmd %d timed out\n", _CMD_N(cmd)); + vdev_neterr(vdev, "devcmd %d timed out\n", _CMD_N(cmd)); return -ETIMEDOUT; } @@ -401,7 +401,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ - vdev_err("Fatal error in devcmd2 init - hardware surprise removal"); + vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n"); return -ENODEV; } @@ -474,8 +474,8 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev, err = (int)vdev->args[1]; if (err != ERR_ECMDUNKNOWN || cmd != CMD_CAPABILITY) - vdev_neterr("Error %d proxy devcmd %d\n", err, - _CMD_N(cmd)); + vdev_neterr(vdev, "Error %d proxy devcmd %d\n", + err, _CMD_N(cmd)); return err; } @@ -768,7 +768,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait); if (err) - vdev_neterr("Can't set packet filter\n"); + vdev_neterr(vdev, "Can't set packet filter\n"); return err; } @@ -785,7 +785,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); if (err) - vdev_neterr("Can't add addr [%pM], %d\n", addr, err); + vdev_neterr(vdev, "Can't add addr [%pM], %d\n", addr, err); return err; } @@ -802,7 +802,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr) err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); if (err) - vdev_neterr("Can't del addr [%pM], %d\n", addr, err); + vdev_neterr(vdev, "Can't del addr [%pM], %d\n", addr, err); return err; } @@ -846,7 +846,8 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) dma_addr_t notify_pa; if (vdev->notify || vdev->notify_pa) { - vdev_neterr("notify block %p still allocated", vdev->notify); + vdev_neterr(vdev, "notify block %p still allocated\n", + vdev->notify); return -EINVAL; } @@ -965,7 +966,7 @@ int vnic_dev_intr_coal_timer_info(struct vnic_dev *vdev) */ if ((err == ERR_ECMDUNKNOWN) || (!err && !(vdev->args[0] && vdev->args[1] && vdev->args[2]))) { - vdev_netwarn("Using default conversion factor for interrupt coalesce timer\n"); + vdev_netwarn(vdev, "Using default conversion factor for interrupt coalesce timer\n"); vnic_dev_intr_coal_timer_info_default(vdev); return 0; } @@ -1103,16 +1104,16 @@ int vnic_devcmd_init(struct vnic_dev *vdev) if (res) { err = vnic_dev_init_devcmd2(vdev); if (err) - vdev_warn("DEVCMD2 init failed: %d, Using DEVCMD1", + vdev_warn(vdev, "DEVCMD2 init failed: %d, Using DEVCMD1\n", err); else return 0; } else { - vdev_warn("DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); + vdev_warn(vdev, "DEVCMD2 resource not found (old firmware?) Using DEVCMD1\n"); } err = vnic_dev_init_devcmd1(vdev); if (err) - vdev_err("DEVCMD1 initialization failed: %d", err); + vdev_err(vdev, "DEVCMD1 initialization failed: %d\n", err); return err; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_intr.c b/drivers/net/ethernet/cisco/enic/vnic_intr.c index 942759d9cb3c..23604e3d4455 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_intr.c +++ b/drivers/net/ethernet/cisco/enic/vnic_intr.c @@ -40,7 +40,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr, intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index); if (!intr->ctrl) { - vdev_err("Failed to hook INTR[%d].ctrl resource\n", index); + vdev_err(vdev, "Failed to hook INTR[%d].ctrl resource\n", + index); return -EINVAL; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c index cce2777dfc41..e572a527b18d 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c @@ -92,7 +92,7 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index, rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index); if (!rq->ctrl) { - vdev_err("Failed to hook RQ[%d] resource\n", index); + vdev_err(vdev, "Failed to hook RQ[%d] resource\n", index); return -EINVAL; } @@ -179,7 +179,7 @@ int vnic_rq_disable(struct vnic_rq *rq) udelay(10); } - vdev_neterr("Failed to disable RQ[%d]\n", rq->index); + vdev_neterr(vdev, "Failed to disable RQ[%d]\n", rq->index); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c index 05ad16a7e872..090cc65658a3 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_wq.c +++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c @@ -95,7 +95,7 @@ int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); if (!wq->ctrl) { - vdev_err("Failed to hook WQ[%d] resource\n", index); + vdev_err(vdev, "Failed to hook WQ[%d] resource\n", index); return -EINVAL; } @@ -187,7 +187,7 @@ int vnic_wq_disable(struct vnic_wq *wq) udelay(10); } - vdev_neterr("Failed to disable WQ[%d]\n", wq->index); + vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index); return -ETIMEDOUT; } -- cgit v1.2.3 From 134611446dc657e1bbc73ca0e4e6b599df687db0 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 9 Mar 2016 03:00:02 +0100 Subject: ip_tunnel: add support for setting flow label via collect metadata This patch extends udp_tunnel6_xmit_skb() to pass in the IPv6 flow label from call sites. Currently, there's no such option and it's always set to zero when writing ip6_flow_hdr(). Add a label member to ip_tunnel_key, so that flow-based tunnels via collect metadata frontends can make use of it. vxlan and geneve will be converted to add flow label support separately. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- drivers/net/geneve.c | 2 +- drivers/net/vxlan.c | 2 +- include/net/dst_metadata.h | 5 ++++- include/net/ip_tunnels.h | 4 +++- include/net/udp_tunnel.h | 4 ++-- net/ipv6/ip6_udp_tunnel.c | 6 +++--- net/tipc/udp_media.c | 2 +- 7 files changed, 15 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 6a0cbbe03e5d..89ccff79d76c 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1054,7 +1054,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, ttl = ttl ? : ip6_dst_hoplimit(dst); } udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, - &fl6.saddr, &fl6.daddr, prio, ttl, + &fl6.saddr, &fl6.daddr, prio, ttl, 0, sport, geneve->dst_port, !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX)); return NETDEV_TX_OK; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2399099e68cf..8bdcd5ea8424 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2066,7 +2066,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } udp_tunnel6_xmit_skb(ndst, sk, skb, dev, - &saddr, &dst->sin6.sin6_addr, tos, ttl, + &saddr, &dst->sin6.sin6_addr, tos, ttl, 0, src_port, dst_port, !udp_sum); #endif } diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 84b833af6882..5db9f5910428 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -126,7 +126,7 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb, ip_tunnel_key_init(&tun_dst->u.tun_info.key, iph->saddr, iph->daddr, iph->tos, iph->ttl, - 0, 0, tunnel_id, flags); + 0, 0, 0, tunnel_id, flags); return tun_dst; } @@ -152,8 +152,11 @@ static inline struct metadata_dst *ipv6_tun_rx_dst(struct sk_buff *skb, info->key.u.ipv6.src = ip6h->saddr; info->key.u.ipv6.dst = ip6h->daddr; + info->key.tos = ipv6_get_dsfield(ip6h); info->key.ttl = ip6h->hop_limit; + info->key.label = ip6_flowlabel(ip6h); + return tun_dst; } diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 0acd80fadb32..5dc2e454f866 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -48,6 +48,7 @@ struct ip_tunnel_key { __be16 tun_flags; u8 tos; /* TOS for IPv4, TC for IPv6 */ u8 ttl; /* TTL for IPv4, HL for IPv6 */ + __be32 label; /* Flow Label for IPv6 */ __be16 tp_src; __be16 tp_dst; }; @@ -181,7 +182,7 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, __be32 saddr, __be32 daddr, - u8 tos, u8 ttl, + u8 tos, u8 ttl, __be32 label, __be16 tp_src, __be16 tp_dst, __be64 tun_id, __be16 tun_flags) { @@ -192,6 +193,7 @@ static inline void ip_tunnel_key_init(struct ip_tunnel_key *key, 0, IP_TUNNEL_KEY_IPV4_PAD_LEN); key->tos = tos; key->ttl = ttl; + key->label = label; key->tun_flags = tun_flags; /* For the tunnel types on the top of IPsec, the tp_src and tp_dst of diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h index 97f5adb121a6..b83114077cee 100644 --- a/include/net/udp_tunnel.h +++ b/include/net/udp_tunnel.h @@ -88,8 +88,8 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, - __u8 prio, __u8 ttl, __be16 src_port, - __be16 dst_port, bool nocheck); + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck); #endif void udp_tunnel_sock_release(struct socket *sock); diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index 14dacf1df529..a7520528ecd2 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c @@ -73,8 +73,8 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, - __u8 prio, __u8 ttl, __be16 src_port, - __be16 dst_port, bool nocheck) + __u8 prio, __u8 ttl, __be32 label, + __be16 src_port, __be16 dst_port, bool nocheck) { struct udphdr *uh; struct ipv6hdr *ip6h; @@ -98,7 +98,7 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); - ip6_flow_hdr(ip6h, prio, htonl(0)); + ip6_flow_hdr(ip6h, prio, label); ip6h->payload_len = htons(skb->len); ip6h->nexthdr = IPPROTO_UDP; ip6h->hop_limit = ttl; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index 49b3c2ede7ab..c94f9a15e2cd 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -196,7 +196,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb, ttl = ip6_dst_hoplimit(ndst); err = udp_tunnel6_xmit_skb(ndst, ub->ubsock->sk, skb, ndst->dev, &src->ipv6, - &dst->ipv6, 0, ttl, src->udp_port, + &dst->ipv6, 0, ttl, 0, src->udp_port, dst->udp_port, false); #endif } -- cgit v1.2.3 From e7f70af111f086a20800ad2e17f544b2e3e0f375 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 9 Mar 2016 03:00:03 +0100 Subject: vxlan: support setting IPv6 flow label This work adds support for setting the IPv6 flow label for vxlan per device and through collect metadata (ip_tunnel_key) frontends. The vxlan dst cache does not need any special considerations here, for the cases where caches can be used, the label is static per cache. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- drivers/net/vxlan.c | 26 +++++++++++++++++++++----- include/net/vxlan.h | 1 + include/uapi/linux/if_link.h | 1 + 3 files changed, 23 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8bdcd5ea8424..8eda76f9e474 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -1790,6 +1790,7 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, #if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, struct sk_buff *skb, int oif, u8 tos, + __be32 label, const struct in6_addr *daddr, struct in6_addr *saddr, struct dst_cache *dst_cache, @@ -1813,6 +1814,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan, fl6.flowi6_tos = RT_TOS(tos); fl6.daddr = *daddr; fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr; + fl6.flowlabel = label; fl6.flowi6_mark = skb->mark; fl6.flowi6_proto = IPPROTO_UDP; @@ -1888,7 +1890,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct vxlan_metadata _md; struct vxlan_metadata *md = &_md; __be16 src_port = 0, dst_port; - __be32 vni; + __be32 vni, label; __be16 df = 0; __u8 tos, ttl; int err; @@ -1939,12 +1941,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, if (tos == 1) tos = ip_tunnel_get_dsfield(old_iph, skb); + label = vxlan->cfg.label; src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min, vxlan->cfg.port_max, true); if (info) { ttl = info->key.ttl; tos = info->key.tos; + label = info->key.label; udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM); if (info->options_len) @@ -2020,7 +2024,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, ndst = vxlan6_get_route(vxlan, skb, rdst ? rdst->remote_ifindex : 0, tos, - &dst->sin6.sin6_addr, &saddr, + label, &dst->sin6.sin6_addr, &saddr, dst_cache, info); if (IS_ERR(ndst)) { netdev_dbg(dev, "no route to %pI6\n", @@ -2066,8 +2070,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } udp_tunnel6_xmit_skb(ndst, sk, skb, dev, - &saddr, &dst->sin6.sin6_addr, tos, ttl, 0, - src_port, dst_port, !udp_sum); + &saddr, &dst->sin6.sin6_addr, tos, ttl, + label, src_port, dst_port, !udp_sum); #endif } @@ -2390,7 +2394,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) if (!vxlan->vn6_sock) return -EINVAL; ndst = vxlan6_get_route(vxlan, skb, 0, info->key.tos, - &info->key.u.ipv6.dst, + info->key.label, &info->key.u.ipv6.dst, &info->key.u.ipv6.src, NULL, info); if (IS_ERR(ndst)) return PTR_ERR(ndst); @@ -2505,6 +2509,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, [IFLA_VXLAN_TTL] = { .type = NLA_U8 }, + [IFLA_VXLAN_LABEL] = { .type = NLA_U32 }, [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 }, [IFLA_VXLAN_AGEING] = { .type = NLA_U32 }, [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 }, @@ -2739,6 +2744,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, vxlan->flags |= VXLAN_F_IPV6; } + if (conf->label && !use_ipv6) { + pr_info("label only supported in use with IPv6\n"); + return -EINVAL; + } + if (conf->remote_ifindex) { lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); dst->remote_ifindex = conf->remote_ifindex; @@ -2887,6 +2897,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev, if (data[IFLA_VXLAN_TTL]) conf.ttl = nla_get_u8(data[IFLA_VXLAN_TTL]); + if (data[IFLA_VXLAN_LABEL]) + conf.label = nla_get_be32(data[IFLA_VXLAN_LABEL]) & + IPV6_FLOWLABEL_MASK; + if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING])) conf.flags |= VXLAN_F_LEARN; @@ -2990,6 +3004,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ + nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */ @@ -3053,6 +3068,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->cfg.ttl) || nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || + nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->flags & VXLAN_F_LEARN)) || nla_put_u8(skb, IFLA_VXLAN_PROXY, diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 6eda4ed4d78b..a763c96ecde4 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -162,6 +162,7 @@ struct vxlan_config { u16 port_max; u8 tos; u8 ttl; + __be32 label; u32 flags; unsigned long age_interval; unsigned int addrmax; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index d452cea59020..6bebc975031d 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -456,6 +456,7 @@ enum { IFLA_VXLAN_GBP, IFLA_VXLAN_REMCSUM_NOPARTIAL, IFLA_VXLAN_COLLECT_METADATA, + IFLA_VXLAN_LABEL, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) -- cgit v1.2.3 From 8eb3b99554b82da968d1fbc00df9f3156c5e2d63 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 9 Mar 2016 03:00:04 +0100 Subject: geneve: support setting IPv6 flow label This work adds support for setting the IPv6 flow label for geneve per device and through collect metadata (ip_tunnel_key) frontends. Also here, the geneve dst cache does not need any special considerations, for the cases where caches can be used, the label is static per cache. Signed-off-by: Daniel Borkmann Signed-off-by: David S. Miller --- drivers/net/geneve.c | 35 +++++++++++++++++++++++++++-------- include/uapi/linux/if_link.h | 1 + 2 files changed, 28 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 89ccff79d76c..33185b9a435e 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -68,6 +68,7 @@ struct geneve_dev { u8 tos; /* TOS override */ union geneve_addr remote; /* IP address for link partner */ struct list_head next; /* geneve's per namespace list */ + __be32 label; /* IPv6 flowlabel override */ __be16 dst_port; bool collect_md; struct gro_cells gro_cells; @@ -846,6 +847,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; fl6->flowi6_tos = RT_TOS(info->key.tos); + fl6->flowlabel = info->key.label; dst_cache = &info->dst_cache; } else { prio = geneve->tos; @@ -857,6 +859,7 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, } fl6->flowi6_tos = RT_TOS(prio); + fl6->flowlabel = geneve->label; fl6->daddr = geneve->remote.sin6.sin6_addr; dst_cache = &geneve->dst_cache; } @@ -998,6 +1001,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct flowi6 fl6; __u8 prio, ttl; __be16 sport; + __be32 label; bool xnet = !net_eq(geneve->net, dev_net(geneve->dev)); u32 flags = geneve->flags; @@ -1041,6 +1045,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, prio = ip_tunnel_ecn_encap(key->tos, iip, skb); ttl = key->ttl; + label = info->key.label; } else { err = geneve6_build_skb(dst, skb, 0, geneve->vni, 0, NULL, flags, xnet); @@ -1052,9 +1057,11 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (!ttl && ipv6_addr_is_multicast(&fl6.daddr)) ttl = 1; ttl = ttl ? : ip6_dst_hoplimit(dst); + label = geneve->label; } + udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev, - &fl6.saddr, &fl6.daddr, prio, ttl, 0, + &fl6.saddr, &fl6.daddr, prio, ttl, label, sport, geneve->dst_port, !!(flags & GENEVE_F_UDP_ZERO_CSUM6_TX)); return NETDEV_TX_OK; @@ -1238,6 +1245,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, [IFLA_GENEVE_TOS] = { .type = NLA_U8 }, + [IFLA_GENEVE_LABEL] = { .type = NLA_U32 }, [IFLA_GENEVE_PORT] = { .type = NLA_U16 }, [IFLA_GENEVE_COLLECT_METADATA] = { .type = NLA_FLAG }, [IFLA_GENEVE_UDP_CSUM] = { .type = NLA_U8 }, @@ -1295,8 +1303,8 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn, static int geneve_configure(struct net *net, struct net_device *dev, union geneve_addr *remote, - __u32 vni, __u8 ttl, __u8 tos, __be16 dst_port, - bool metadata, u32 flags) + __u32 vni, __u8 ttl, __u8 tos, __be32 label, + __be16 dst_port, bool metadata, u32 flags) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); @@ -1306,7 +1314,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, if (!remote) return -EINVAL; if (metadata && - (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl)) + (remote->sa.sa_family != AF_UNSPEC || vni || tos || ttl || label)) return -EINVAL; geneve->net = net; @@ -1321,10 +1329,14 @@ static int geneve_configure(struct net *net, struct net_device *dev, (remote->sa.sa_family == AF_INET6 && ipv6_addr_is_multicast(&remote->sin6.sin6_addr))) return -EINVAL; + if (label && remote->sa.sa_family != AF_INET6) + return -EINVAL; + geneve->remote = *remote; geneve->ttl = ttl; geneve->tos = tos; + geneve->label = label; geneve->dst_port = dst_port; geneve->collect_md = metadata; geneve->flags = flags; @@ -1367,6 +1379,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, __u8 ttl = 0, tos = 0; bool metadata = false; union geneve_addr remote = geneve_remote_unspec; + __be32 label = 0; __u32 vni = 0; u32 flags = 0; @@ -1403,6 +1416,10 @@ static int geneve_newlink(struct net *net, struct net_device *dev, if (data[IFLA_GENEVE_TOS]) tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + if (data[IFLA_GENEVE_LABEL]) + label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & + IPV6_FLOWLABEL_MASK; + if (data[IFLA_GENEVE_PORT]) dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]); @@ -1421,8 +1438,8 @@ static int geneve_newlink(struct net *net, struct net_device *dev, nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) flags |= GENEVE_F_UDP_ZERO_CSUM6_RX; - return geneve_configure(net, dev, &remote, vni, ttl, tos, dst_port, - metadata, flags); + return geneve_configure(net, dev, &remote, vni, ttl, tos, label, + dst_port, metadata, flags); } static void geneve_dellink(struct net_device *dev, struct list_head *head) @@ -1439,6 +1456,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ + nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_UDP_CSUM */ @@ -1469,7 +1487,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) } if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) || - nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos)) + nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos) || + nla_put_be32(skb, IFLA_GENEVE_LABEL, geneve->label)) goto nla_put_failure; if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port)) @@ -1521,7 +1540,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, return dev; err = geneve_configure(net, dev, &geneve_remote_unspec, - 0, 0, 0, htons(dst_port), true, + 0, 0, 0, 0, htons(dst_port), true, GENEVE_F_UDP_ZERO_CSUM6_RX); if (err) goto err; diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6bebc975031d..249eef9a21bd 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -479,6 +479,7 @@ enum { IFLA_GENEVE_UDP_CSUM, IFLA_GENEVE_UDP_ZERO_CSUM6_TX, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, + IFLA_GENEVE_LABEL, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) -- cgit v1.2.3 From fc916ff2027760900f3b373c388a2098253f3c2a Mon Sep 17 00:00:00 2001 From: Sudarsana Reddy Kalluru Date: Wed, 9 Mar 2016 09:16:23 +0200 Subject: qed: Prevent MF link notifications When device is configured for Multi-function mode, some older management firmware might incorrectly notify interfaces of link changes while they haven't requested the physical link configuration to be set. This can create bizzare race conditions where unloading interfaces are getting notified that the link is up. Let the driver compensate - store the logical requested state of the link and don't propagate notifications after protocol driver explicitly requires the link to be unset. Signed-off-by: Sudarsana Reddy Kalluru Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed.h | 3 +++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index e5604eec81bf..fcb8e9ba51d9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -303,6 +303,9 @@ struct qed_hwfn { bool b_int_enabled; bool b_int_requested; + /* True if the driver requests for the link */ + bool b_drv_link_init; + struct qed_mcp_info *mcp_info; struct qed_hw_cid_data *p_tx_cids; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index f23ce734ab63..2bf98248c29e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -390,7 +390,10 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, return; } - p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + if (p_hwfn->b_drv_link_init) + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + else + p_link->link_up = false; p_link->full_duplex = true; switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { @@ -520,6 +523,8 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, offsetof(struct public_drv_mb, union_data) + i, ((u32 *)&phy_cfg)[i >> 2]); + p_hwfn->b_drv_link_init = b_up; + if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", -- cgit v1.2.3 From 5529bad98f10f742e3ab490733bed8e108508759 Mon Sep 17 00:00:00 2001 From: Tomer Tayar Date: Wed, 9 Mar 2016 09:16:24 +0200 Subject: qed: Major changes to MB locking Driver interaction with the managemnt firmware is done via mailbox commands which the management firmware periodically sample, as well as placing of additional data in set places in the shared memory. Each PF has a single designated mailbox address, and all flows that require messaging to the management should use it. This patch does 2 things: 1. It re-defines the critical section surrounding the mailbox sending - that section should include the setting of the shared memory as well as the sending of the command [otherwise a race might send a command with the data of a different command]. 2. It moves the locking scheme from using mutices into using spinlocks. This lays the groundwork for sending MFW commands from non-sleepable contexts. Signed-off-by: Tomer Tayar Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 250 ++++++++++++++++++------------ drivers/net/ethernet/qlogic/qed/qed_mcp.h | 14 +- 2 files changed, 167 insertions(+), 97 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 2bf98248c29e..6937c88fef4d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -11,8 +11,8 @@ #include #include #include -#include #include +#include #include #include "qed.h" #include "qed_hsi.h" @@ -168,8 +168,8 @@ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) goto err; - /* Initialize the MFW mutex */ - mutex_init(&p_info->mutex); + /* Initialize the MFW spinlock */ + spin_lock_init(&p_info->lock); return 0; @@ -179,6 +179,52 @@ err: return -ENOMEM; } +/* Locks the MFW mailbox of a PF to ensure a single access. + * The lock is achieved in most cases by holding a spinlock, causing other + * threads to wait till a previous access is done. + * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single + * access is achieved by setting a blocking flag, which will fail other + * competing contexts to send their mailboxes. + */ +static int qed_mcp_mb_lock(struct qed_hwfn *p_hwfn, + u32 cmd) +{ + spin_lock_bh(&p_hwfn->mcp_info->lock); + + /* The spinlock shouldn't be acquired when the mailbox command is + * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel + * pending [UN]LOAD_REQ command of another PF together with a spinlock + * (i.e. interrupts are disabled) - can lead to a deadlock. + * It is assumed that for a single PF, no other mailbox commands can be + * sent from another context while sending LOAD_REQ, and that any + * parallel commands to UNLOAD_REQ can be cancelled. + */ + if (cmd == DRV_MSG_CODE_LOAD_DONE || cmd == DRV_MSG_CODE_UNLOAD_DONE) + p_hwfn->mcp_info->block_mb_sending = false; + + if (p_hwfn->mcp_info->block_mb_sending) { + DP_NOTICE(p_hwfn, + "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n", + cmd); + spin_unlock_bh(&p_hwfn->mcp_info->lock); + return -EBUSY; + } + + if (cmd == DRV_MSG_CODE_LOAD_REQ || cmd == DRV_MSG_CODE_UNLOAD_REQ) { + p_hwfn->mcp_info->block_mb_sending = true; + spin_unlock_bh(&p_hwfn->mcp_info->lock); + } + + return 0; +} + +static void qed_mcp_mb_unlock(struct qed_hwfn *p_hwfn, + u32 cmd) +{ + if (cmd != DRV_MSG_CODE_LOAD_REQ && cmd != DRV_MSG_CODE_UNLOAD_REQ) + spin_unlock_bh(&p_hwfn->mcp_info->lock); +} + int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -187,6 +233,13 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, u32 org_mcp_reset_seq, cnt = 0; int rc = 0; + /* Ensure that only a single thread is accessing the mailbox at a + * certain time. + */ + rc = qed_mcp_mb_lock(p_hwfn, DRV_MSG_CODE_MCP_RESET); + if (rc != 0) + return rc; + /* Set drv command along with the updated sequence */ org_mcp_reset_seq = qed_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, @@ -209,6 +262,8 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn, rc = -EAGAIN; } + qed_mcp_mb_unlock(p_hwfn, DRV_MSG_CODE_MCP_RESET); + return rc; } @@ -275,14 +330,12 @@ static int qed_do_mcp_cmd(struct qed_hwfn *p_hwfn, return rc; } -int qed_mcp_cmd(struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt, - u32 cmd, - u32 param, - u32 *o_mcp_resp, - u32 *o_mcp_param) +static int qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + struct qed_mcp_mb_params *p_mb_params) { - int rc = 0; + u32 union_data_addr; + int rc; /* MCP not initialized */ if (!qed_mcp_is_init(p_hwfn)) { @@ -290,28 +343,56 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, return -EBUSY; } - /* Lock Mutex to ensure only single thread is - * accessing the MCP at one time + union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + offsetof(struct public_drv_mb, union_data); + + /* Ensure that only a single thread is accessing the mailbox at a + * certain time. */ - mutex_lock(&p_hwfn->mcp_info->mutex); - rc = qed_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, - o_mcp_resp, o_mcp_param); - /* Release Mutex */ - mutex_unlock(&p_hwfn->mcp_info->mutex); + rc = qed_mcp_mb_lock(p_hwfn, p_mb_params->cmd); + if (rc) + return rc; + + if (p_mb_params->p_data_src != NULL) + qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, + p_mb_params->p_data_src, + sizeof(*p_mb_params->p_data_src)); + + rc = qed_do_mcp_cmd(p_hwfn, p_ptt, p_mb_params->cmd, + p_mb_params->param, &p_mb_params->mcp_resp, + &p_mb_params->mcp_param); + + if (p_mb_params->p_data_dst != NULL) + qed_memcpy_from(p_hwfn, p_ptt, p_mb_params->p_data_dst, + union_data_addr, + sizeof(*p_mb_params->p_data_dst)); + + qed_mcp_mb_unlock(p_hwfn, p_mb_params->cmd); return rc; } -static void qed_mcp_set_drv_ver(struct qed_dev *cdev, - struct qed_hwfn *p_hwfn, - struct qed_ptt *p_ptt) +int qed_mcp_cmd(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) { - u32 i; + struct qed_mcp_mb_params mb_params; + int rc; + + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.param = param; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) + return rc; + + *o_mcp_resp = mb_params.mcp_resp; + *o_mcp_param = mb_params.mcp_param; - /* Copy version string to MCP */ - for (i = 0; i < MCP_DRV_VER_STR_SIZE_DWORD; i++) - DRV_MB_WR(p_hwfn, p_ptt, union_data.ver_str[i], - *(u32 *)&cdev->ver_str[i * sizeof(u32)]); + return 0; } int qed_mcp_load_req(struct qed_hwfn *p_hwfn, @@ -319,26 +400,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, u32 *p_load_code) { struct qed_dev *cdev = p_hwfn->cdev; - u32 param; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; int rc; - if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); - return -EBUSY; - } - - /* Save driver's version to shmem */ - qed_mcp_set_drv_ver(cdev, p_hwfn, p_ptt); - - DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", - p_hwfn->mcp_info->drv_mb_seq, - p_hwfn->mcp_info->drv_pulse_seq); - + memset(&mb_params, 0, sizeof(mb_params)); /* Load Request */ - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ, - (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | - cdev->drv_type), - p_load_code, ¶m); + mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; + mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | + cdev->drv_type; + memcpy(&union_data.ver_str, cdev->ver_str, MCP_DRV_VER_STR_SIZE); + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ if (rc) { @@ -346,6 +419,8 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return rc; } + *p_load_code = mb_params.mcp_resp; + /* If MFW refused (e.g. other port is in diagnostic mode) we * must abort. This can happen in the following cases: * - Other port is in diagnostic mode @@ -495,55 +570,43 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, bool b_up) { struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input; - u32 param = 0, reply = 0, cmd; - struct pmm_phy_cfg phy_cfg; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + struct pmm_phy_cfg *phy_cfg; int rc = 0; - u32 i; - - if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); - return -EBUSY; - } + u32 cmd; /* Set the shmem configuration according to params */ - memset(&phy_cfg, 0, sizeof(phy_cfg)); + phy_cfg = &union_data.drv_phy_cfg; + memset(phy_cfg, 0, sizeof(*phy_cfg)); cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; if (!params->speed.autoneg) - phy_cfg.speed = params->speed.forced_speed; - phy_cfg.pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; - phy_cfg.pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; - phy_cfg.pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; - phy_cfg.adv_speed = params->speed.advertised_speeds; - phy_cfg.loopback_mode = params->loopback_mode; - - /* Write the requested configuration to shmem */ - for (i = 0; i < sizeof(phy_cfg); i += 4) - qed_wr(p_hwfn, p_ptt, - p_hwfn->mcp_info->drv_mb_addr + - offsetof(struct public_drv_mb, union_data) + i, - ((u32 *)&phy_cfg)[i >> 2]); + phy_cfg->speed = params->speed.forced_speed; + phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; + phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; + phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + phy_cfg->adv_speed = params->speed.advertised_speeds; + phy_cfg->loopback_mode = params->loopback_mode; p_hwfn->b_drv_link_init = b_up; if (b_up) { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n", - phy_cfg.speed, - phy_cfg.pause, - phy_cfg.adv_speed, - phy_cfg.loopback_mode, - phy_cfg.feature_config_flags); + phy_cfg->speed, + phy_cfg->pause, + phy_cfg->adv_speed, + phy_cfg->loopback_mode, + phy_cfg->feature_config_flags); } else { DP_VERBOSE(p_hwfn, NETIF_MSG_LINK, "Resetting link\n"); } - DP_VERBOSE(p_hwfn, QED_MSG_SP, "fw_seq 0x%08x, drv_pulse 0x%x\n", - p_hwfn->mcp_info->drv_mb_seq, - p_hwfn->mcp_info->drv_pulse_seq); - - /* Load Request */ - rc = qed_mcp_cmd(p_hwfn, p_ptt, cmd, 0, &reply, ¶m); + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = cmd; + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); /* if mcp fails to respond we must abort */ if (rc) { @@ -836,31 +899,28 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_mcp_drv_version *p_ver) { - int rc = 0; - u32 param = 0, reply = 0, i; - - if (!qed_mcp_is_init(p_hwfn)) { - DP_NOTICE(p_hwfn, "MFW is not initialized !\n"); - return -EBUSY; - } + struct drv_version_stc *p_drv_version; + struct qed_mcp_mb_params mb_params; + union drv_union_data union_data; + __be32 val; + u32 i; + int rc; - DRV_MB_WR(p_hwfn, p_ptt, union_data.drv_version.version, - p_ver->version); - /* Copy version string to shmem */ - for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / 4; i++) { - DRV_MB_WR(p_hwfn, p_ptt, - union_data.drv_version.name[i * sizeof(u32)], - *(u32 *)&p_ver->name[i * sizeof(u32)]); + p_drv_version = &union_data.drv_version; + p_drv_version->version = p_ver->version; + for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) { + val = cpu_to_be32(p_ver->name[i]); + *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; } - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, &reply, - ¶m); - if (rc) { + memset(&mb_params, 0, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_SET_VERSION; + mb_params.p_data_src = &union_data; + rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc) DP_ERR(p_hwfn, "MCP response failure, aborting\n"); - return rc; - } - return 0; + return rc; } int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 506197d5c3dd..50917a2131a5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -11,8 +11,8 @@ #include #include -#include #include +#include #include "qed_hsi.h" struct qed_mcp_link_speed_params { @@ -255,7 +255,8 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ ((_p_hwfn)->cdev->num_ports_in_engines * 2)) struct qed_mcp_info { - struct mutex mutex; /* MCP access lock */ + spinlock_t lock; + bool block_mb_sending; u32 public_base; u32 drv_mb_addr; u32 mfw_mb_addr; @@ -272,6 +273,15 @@ struct qed_mcp_info { u16 mcp_hist; }; +struct qed_mcp_mb_params { + u32 cmd; + u32 param; + union drv_union_data *p_data_src; + union drv_union_data *p_data_dst; + u32 mcp_resp; + u32 mcp_param; +}; + /** * @brief Initialize the interface with the MCP * -- cgit v1.2.3 From 334c03b59ba28aa76384474e705a8a0d47f0bb39 Mon Sep 17 00:00:00 2001 From: Zvi Nachmani Date: Wed, 9 Mar 2016 09:16:25 +0200 Subject: qed: Notify of transciever changes Handle a new message from the MFW, one that indicate that the transciever state has changed, and log that into the system logs. Signed-off-by: Zvi Nachmani Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 11 +++++++++++ drivers/net/ethernet/qlogic/qed/qed_mcp.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 236db8a99ec3..a368f5e71d95 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3621,6 +3621,9 @@ struct public_port { u32 fc_npiv_nvram_tbl_addr; u32 fc_npiv_nvram_tbl_size; u32 transceiver_data; +#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF +#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001 }; /**************************************/ @@ -3955,6 +3958,14 @@ enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, MFW_DRV_MSG_MAX }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 6937c88fef4d..39c831d6ead3 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -440,6 +440,33 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, return 0; } +static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 transceiver_state; + + transceiver_state = qed_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data)); + + DP_VERBOSE(p_hwfn, + (NETIF_MSG_HW | QED_MSG_SP), + "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n", + transceiver_state, + (u32)(p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, + transceiver_data))); + + transceiver_state = GET_FIELD(transceiver_state, + PMM_TRANSCEIVER_STATE); + + if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) + DP_NOTICE(p_hwfn, "Transceiver is present.\n"); + else + DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n"); +} + static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_reset) @@ -649,6 +676,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, case MFW_DRV_MSG_LINK_CHANGE: qed_mcp_handle_link_change(p_hwfn, p_ptt, false); break; + case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: + qed_mcp_handle_transceiver_change(p_hwfn, p_ptt); + break; default: DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i); rc = -EINVAL; -- cgit v1.2.3 From 8f60bafec368191594a8a6ed5f0ce14968131754 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Wed, 9 Mar 2016 09:16:26 +0200 Subject: qed: Enlrage the drain timeout In the scenario where slowpath configuration isn't passing due to various pause configurations affecting the chip, the theoretical time required in worst-case-scenario to empty hw fifos sufficiently to guarantee that slowpath configuration would flow is currently insufficient. This increases such a drain request to the theoretical maximum. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qed/qed_mcp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 39c831d6ead3..b89c9a8e1655 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -899,11 +899,11 @@ int qed_mcp_drain(struct qed_hwfn *p_hwfn, int rc; rc = qed_mcp_cmd(p_hwfn, p_ptt, - DRV_MSG_CODE_NIG_DRAIN, 100, + DRV_MSG_CODE_NIG_DRAIN, 1000, &resp, ¶m); /* Wait for the drain to complete before returning */ - msleep(120); + msleep(1020); return rc; } -- cgit v1.2.3 From 3a8befcd7872f572882ad7e14994a17f9b55dd4e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Sat, 12 Mar 2016 12:03:27 +0100 Subject: rocker: move ageing_time from struct rocker to struct ofdpa This is OF-DPA specific, used only there, similar to ofdpa_port->ageing_time. So move it to OF-DPA code. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/rocker/rocker.h | 1 - drivers/net/ethernet/rocker/rocker_main.c | 2 -- drivers/net/ethernet/rocker/rocker_ofdpa.c | 11 +++++++---- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 011f1b6f91bb..1ab995f7146b 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -63,7 +63,6 @@ struct rocker { struct { u64 id; } hw; - unsigned long ageing_time; spinlock_t cmd_ring_lock; /* for cmd ring accesses */ struct rocker_dma_ring_info cmd_ring; struct rocker_dma_ring_info event_ring; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index c8b1bf474e89..28b775e5a9ad 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -2733,8 +2733,6 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) rocker->hw.id = rocker_read64(rocker, SWITCH_ID); - rocker->ageing_time = BR_DEFAULT_AGEING_TIME; - err = rocker_probe_ports(rocker); if (err) { dev_err(&pdev->dev, "failed to probe ports\n"); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index cfde525d9123..0e758bcb26b0 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -188,6 +188,7 @@ struct ofdpa { DECLARE_HASHTABLE(neigh_tbl, 16); spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */ u32 neigh_tbl_next_index; + unsigned long ageing_time; }; struct ofdpa_port { @@ -2105,7 +2106,7 @@ static void ofdpa_fdb_cleanup(unsigned long data) struct ofdpa_port *ofdpa_port; struct ofdpa_fdb_tbl_entry *entry; struct hlist_node *tmp; - unsigned long next_timer = jiffies + ofdpa->rocker->ageing_time; + unsigned long next_timer = jiffies + ofdpa->ageing_time; unsigned long expires; unsigned long lock_flags; int flags = OFDPA_OP_FLAG_NOWAIT | OFDPA_OP_FLAG_REMOVE | @@ -2492,6 +2493,8 @@ static int ofdpa_init(struct rocker *rocker) (unsigned long) ofdpa); mod_timer(&ofdpa->fdb_cleanup_timer, jiffies); + ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME; + return 0; } @@ -2648,12 +2651,12 @@ ofdpa_port_attr_bridge_ageing_time_set(struct rocker_port *rocker_port, struct switchdev_trans *trans) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; - struct rocker *rocker = rocker_port->rocker; + struct ofdpa *ofdpa = ofdpa_port->ofdpa; if (!switchdev_trans_ph_prepare(trans)) { ofdpa_port->ageing_time = clock_t_to_jiffies(ageing_time); - if (ofdpa_port->ageing_time < rocker->ageing_time) - rocker->ageing_time = ofdpa_port->ageing_time; + if (ofdpa_port->ageing_time < ofdpa->ageing_time) + ofdpa->ageing_time = ofdpa_port->ageing_time; mod_timer(&ofdpa_port->ofdpa->fdb_cleanup_timer, jiffies); } -- cgit v1.2.3 From c194cf93c164ed1c71142485ee0f70f9f2d1fe35 Mon Sep 17 00:00:00 2001 From: Alexander Duyck Date: Wed, 9 Mar 2016 09:24:23 -0800 Subject: gro: Defer clearing of flush bit in tunnel paths This patch updates the GRO handlers for GRE, VXLAN, GENEVE, and FOU so that we do not clear the flush bit until after we have called the next level GRO handler. Previously this was being cleared before parsing through the list of frames, however this resulted in several paths where either the bit needed to be reset but wasn't as in the case of FOU, or cases where it was being set as in GENEVE. By just deferring the clearing of the bit until after the next level protocol has been parsed we can avoid any unnecessary bit twiddling and avoid bugs. Signed-off-by: Alexander Duyck Signed-off-by: David S. Miller --- drivers/net/geneve.c | 7 ++----- drivers/net/vxlan.c | 3 +-- net/ipv4/fou.c | 3 +-- net/ipv4/gre_offload.c | 3 +-- 4 files changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 33185b9a435e..192631a345df 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -463,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, goto out; } - flush = 0; - for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -481,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head, rcu_read_lock(); ptype = gro_find_receive_by_type(type); - if (!ptype) { - flush = 1; + if (!ptype) goto out_unlock; - } skb_gro_pull(skb, gh_len); skb_gro_postpull_rcsum(skb, gh, gh_len); pp = ptype->callbacks.gro_receive(head, skb); + flush = 0; out_unlock: rcu_read_unlock(); diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 8eda76f9e474..800106a7246c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -591,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */ - flush = 0; - for (p = *head; p; p = p->next) { if (!NAPI_GRO_CB(p)->same_flow) continue; @@ -606,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, } pp = eth_gro_receive(head, skb); + flush = 0; out: skb_gro_remcsum_cleanup(skb, &grc); diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 88dab0c1670c..780484243e14 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -319,8 +319,6 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, skb_gro_pull(skb, hdrlen); - flush = 0; - for (p = *head; p; p = p->next) { const struct guehdr *guehdr2; @@ -352,6 +350,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, goto out_unlock; pp = ops->callbacks.gro_receive(head, skb); + flush = 0; out_unlock: rcu_read_unlock(); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 47f4c544c916..540866dbd27d 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -175,8 +175,6 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, null_compute_pseudo); } - flush = 0; - for (p = *head; p; p = p->next) { const struct gre_base_hdr *greh2; @@ -213,6 +211,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, skb_gro_postpull_rcsum(skb, greh, grehlen); pp = ptype->callbacks.gro_receive(head, skb); + flush = 0; out_unlock: rcu_read_unlock(); -- cgit v1.2.3 From 562abd39a1902745bdcab266c7824cd6c5bc34d3 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 10 Mar 2016 12:30:27 +0000 Subject: xen-netback: support multiple extra info fragments passed from frontend The code does not currently support a frontend passing multiple extra info fragments to the backend in a tx request. The xenvif_get_extras() function handles multiple extra_info fragments but make_tx_response() assumes there is only ever a single extra info fragment. This patch modifies xenvif_get_extras() to pass back a count of extra info fragments, which is then passed to make_tx_response() (after possibly being stashed in pending_tx_info for deferred responses). Signed-off-by: Paul Durrant Cc: Wei Liu Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/common.h | 1 + drivers/net/xen-netback/netback.c | 65 +++++++++++++++++++++++++-------------- 2 files changed, 43 insertions(+), 23 deletions(-) (limited to 'drivers') diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 112825200d41..f44b38846420 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -52,6 +52,7 @@ typedef unsigned int pending_ring_idx_t; struct pending_tx_info { struct xen_netif_tx_request req; /* tx request */ + unsigned int extra_count; /* Callback data for released SKBs. The callback is always * xenvif_zerocopy_callback, desc contains the pending_idx, which is * also an index in pending_tx_info array. It is initialized in diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 61b97c34bb3b..b42f26029225 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -95,6 +95,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, + unsigned int extra_count, s8 st); static void push_tx_responses(struct xenvif_queue *queue); @@ -696,14 +697,15 @@ void xenvif_tx_credit_callback(unsigned long data) } static void xenvif_tx_err(struct xenvif_queue *queue, - struct xen_netif_tx_request *txp, RING_IDX end) + struct xen_netif_tx_request *txp, + unsigned int extra_count, RING_IDX end) { RING_IDX cons = queue->tx.req_cons; unsigned long flags; do { spin_lock_irqsave(&queue->response_lock, flags); - make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); + make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); push_tx_responses(queue); spin_unlock_irqrestore(&queue->response_lock, flags); if (cons == end) @@ -724,6 +726,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif) static int xenvif_count_requests(struct xenvif_queue *queue, struct xen_netif_tx_request *first, + unsigned int extra_count, struct xen_netif_tx_request *txp, int work_to_do) { @@ -812,7 +815,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, } while (more_data); if (drop_err) { - xenvif_tx_err(queue, first, cons + slots); + xenvif_tx_err(queue, first, extra_count, cons + slots); return drop_err; } @@ -827,9 +830,10 @@ struct xenvif_tx_cb { #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, - u16 pending_idx, - struct xen_netif_tx_request *txp, - struct gnttab_map_grant_ref *mop) + u16 pending_idx, + struct xen_netif_tx_request *txp, + unsigned int extra_count, + struct gnttab_map_grant_ref *mop) { queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), @@ -838,6 +842,7 @@ static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, memcpy(&queue->pending_tx_info[pending_idx].req, txp, sizeof(*txp)); + queue->pending_tx_info[pending_idx].extra_count = extra_count; } static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) @@ -880,7 +885,7 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); } @@ -893,7 +898,8 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; - xenvif_tx_create_map_op(queue, pending_idx, txp, gop); + xenvif_tx_create_map_op(queue, pending_idx, txp, 0, + gop); frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx); } @@ -1095,8 +1101,9 @@ static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) } static int xenvif_get_extras(struct xenvif_queue *queue, - struct xen_netif_extra_info *extras, - int work_to_do) + struct xen_netif_extra_info *extras, + unsigned int *extra_count, + int work_to_do) { struct xen_netif_extra_info extra; RING_IDX cons = queue->tx.req_cons; @@ -1109,9 +1116,12 @@ static int xenvif_get_extras(struct xenvif_queue *queue, } RING_COPY_REQUEST(&queue->tx, cons, &extra); + + queue->tx.req_cons = ++cons; + (*extra_count)++; + if (unlikely(!extra.type || extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) { - queue->tx.req_cons = ++cons; netdev_err(queue->vif->dev, "Invalid extra type: %d\n", extra.type); xenvif_fatal_tx_err(queue->vif); @@ -1119,7 +1129,6 @@ static int xenvif_get_extras(struct xenvif_queue *queue, } memcpy(&extras[extra.type - 1], &extra, sizeof(extra)); - queue->tx.req_cons = ++cons; } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE); return work_to_do; @@ -1294,6 +1303,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, struct xen_netif_tx_request txreq; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1]; + unsigned int extra_count; u16 pending_idx; RING_IDX idx; int work_to_do; @@ -1330,8 +1340,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, queue->tx.req_cons = ++idx; memset(extras, 0, sizeof(extras)); + extra_count = 0; if (txreq.flags & XEN_NETTXF_extra_info) { work_to_do = xenvif_get_extras(queue, extras, + &extra_count, work_to_do); idx = queue->tx.req_cons; if (unlikely(work_to_do < 0)) @@ -1344,7 +1356,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1]; ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr); - make_tx_response(queue, &txreq, + make_tx_response(queue, &txreq, extra_count, (ret == 0) ? XEN_NETIF_RSP_OKAY : XEN_NETIF_RSP_ERROR); @@ -1358,12 +1370,14 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1]; xenvif_mcast_del(queue->vif, extra->u.mcast.addr); - make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY); + make_tx_response(queue, &txreq, extra_count, + XEN_NETIF_RSP_OKAY); push_tx_responses(queue); continue; } - ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); + ret = xenvif_count_requests(queue, &txreq, extra_count, + txfrags, work_to_do); if (unlikely(ret < 0)) break; @@ -1372,7 +1386,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (unlikely(txreq.size < ETH_HLEN)) { netdev_dbg(queue->vif->dev, "Bad packet size: %d\n", txreq.size); - xenvif_tx_err(queue, &txreq, idx); + xenvif_tx_err(queue, &txreq, extra_count, idx); break; } @@ -1397,7 +1411,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (unlikely(skb == NULL)) { netdev_dbg(queue->vif->dev, "Can't allocate a skb in start_xmit.\n"); - xenvif_tx_err(queue, &txreq, idx); + xenvif_tx_err(queue, &txreq, extra_count, idx); break; } @@ -1416,7 +1430,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, nskb = xenvif_alloc_skb(0); if (unlikely(nskb == NULL)) { kfree_skb(skb); - xenvif_tx_err(queue, &txreq, idx); + xenvif_tx_err(queue, &txreq, extra_count, idx); if (net_ratelimit()) netdev_err(queue->vif->dev, "Can't allocate the frag_list skb.\n"); @@ -1457,13 +1471,16 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, if (data_len < txreq.size) { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], pending_idx); - xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); + xenvif_tx_create_map_op(queue, pending_idx, &txreq, + extra_count, gop); gop++; } else { frag_set_pending_idx(&skb_shinfo(skb)->frags[0], INVALID_PENDING_IDX); - memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, - sizeof(txreq)); + memcpy(&queue->pending_tx_info[pending_idx].req, + &txreq, sizeof(txreq)); + queue->pending_tx_info[pending_idx].extra_count = + extra_count; } queue->pending_cons++; @@ -1804,7 +1821,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, spin_lock_irqsave(&queue->response_lock, flags); - make_tx_response(queue, &pending_tx_info->req, status); + make_tx_response(queue, &pending_tx_info->req, + pending_tx_info->extra_count, status); /* Release the pending index before pusing the Tx response so * its available before a new Tx request is pushed by the @@ -1821,6 +1839,7 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, static void make_tx_response(struct xenvif_queue *queue, struct xen_netif_tx_request *txp, + unsigned int extra_count, s8 st) { RING_IDX i = queue->tx.rsp_prod_pvt; @@ -1830,7 +1849,7 @@ static void make_tx_response(struct xenvif_queue *queue, resp->id = txp->id; resp->status = st; - if (txp->flags & XEN_NETTXF_extra_info) + while (extra_count-- != 0) RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; queue->tx.rsp_prod_pvt = ++i; -- cgit v1.2.3 From 8e4ee59c1e75b74966476dcc3552c3b30d2768e7 Mon Sep 17 00:00:00 2001 From: Paul Durrant Date: Thu, 10 Mar 2016 12:30:28 +0000 Subject: xen-netback: reduce log spam Remove the "prepare for reconnect" pr_info in xenbus.c. It's largely uninteresting and the states of the frontend and backend can easily be observed by watching the (o)xenstored log. Signed-off-by: Paul Durrant Cc: Wei Liu Acked-by: Wei Liu Signed-off-by: David S. Miller --- drivers/net/xen-netback/xenbus.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 39a303de20dd..bd182cd55dda 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -511,8 +511,6 @@ static void set_backend_state(struct backend_info *be, switch (state) { case XenbusStateInitWait: case XenbusStateConnected: - pr_info("%s: prepare for reconnect\n", - be->dev->nodename); backend_switch_state(be, XenbusStateInitWait); break; case XenbusStateClosing: -- cgit v1.2.3 From 6bdaa5e9ed39b3b3328f35d218e8ad5a99cfc4d2 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Thu, 10 Mar 2016 16:44:32 +0100 Subject: net: macb: fix default configuration for GMAC on AT91 On AT91 SoCs, the User Register (USRIO) exposes a switch to configure the "Reduced" or "Traditional" version of the Media Independent Interface (RMII vs. MII or RGMII vs. GMII). As on the older EMAC version, on GMAC, this switch is set by default to the non-reduced type of interface, so use the existing capability and extend it to GMII as well. We then keep the current logic in the macb_init() function. The capabilities of sama5d2, sama5d4 and sama5d3 GEM interface are updated in the macb_config structure to be able to properly enable them with a traditional interface (GMII or MII). Reported-by: Romain HENRIET Signed-off-by: Nicolas Ferre Signed-off-by: David S. Miller --- drivers/net/ethernet/cadence/macb.c | 13 +++++++------ drivers/net/ethernet/cadence/macb.h | 2 +- 2 files changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 7ccf2298a5fa..3ce6095ced3d 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2440,9 +2440,9 @@ static int macb_init(struct platform_device *pdev) if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) val = GEM_BIT(RGMII); else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII && - (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(RMII); - else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII)) + else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII)) val = MACB_BIT(MII); if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN) @@ -2774,7 +2774,7 @@ static int at91ether_init(struct platform_device *pdev) } static const struct macb_config at91sam9260_config = { - .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII, + .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .clk_init = macb_clk_init, .init = macb_init, }; @@ -2787,21 +2787,22 @@ static const struct macb_config pc302gem_config = { }; static const struct macb_config sama5d2_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d3_config = { - .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, + .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE + | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = macb_init, }; static const struct macb_config sama5d4_config = { - .caps = 0, + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII, .dma_burst_length = 4, .clk_init = macb_clk_init, .init = macb_init, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 9ba416d5afff..8a13824ef802 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -400,7 +400,7 @@ /* Capability mask bits */ #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 -#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004 +#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 #define MACB_CAPS_USRIO_DISABLED 0x00000010 #define MACB_CAPS_FIFO_MODE 0x10000000 -- cgit v1.2.3 From 233fa44bd67ae0c6cbc01f96ab87d66a6c57812e Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 10 Mar 2016 23:10:21 +0100 Subject: mlxsw: pci: Implement reset done check Firmware now tells us that the reset is done by passing a magic value via register. Use it to shorten the wait in case this is supported. With old firmware, we still wait until the timeout is reached. Signed-off-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlxsw/pci.c | 15 +++++++++++---- drivers/net/ethernet/mellanox/mlxsw/pci.h | 3 +++ 2 files changed, 14 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 7992c553c1f5..7f4173c8eda3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1681,11 +1681,18 @@ static const struct mlxsw_bus mlxsw_pci_bus = { static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci) { + unsigned long end; + mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT); - /* Current firware does not let us know when the reset is done. - * So we just wait here for constant time and hope for the best. - */ - msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + wmb(); /* reset needs to be written before we read control register */ + end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS); + do { + u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY); + + if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC) + break; + cond_resched(); + } while (time_before(jiffies, end)); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h index 912106054ff2..d942a3e6fa41 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h @@ -61,6 +61,9 @@ #define MLXSW_PCI_SW_RESET 0xF0010 #define MLXSW_PCI_SW_RESET_RST_BIT BIT(0) #define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000 +#define MLXSW_PCI_FW_READY 0xA1844 +#define MLXSW_PCI_FW_READY_MASK 0xFF +#define MLXSW_PCI_FW_READY_MAGIC 0x5E #define MLXSW_PCI_DOORBELL_SDQ_OFFSET 0x000 #define MLXSW_PCI_DOORBELL_RDQ_OFFSET 0x200 -- cgit v1.2.3 From c45569755e828a8458d125bb93feb1d90ac9be6f Mon Sep 17 00:00:00 2001 From: Chun-Hao Lin Date: Fri, 11 Mar 2016 14:21:14 +0800 Subject: r8169:Remove unnecessary phy reset for pcie nic when setting link spped. For pcie nic, after setting link speed and there is no link driver does not need to do phy reset until link up. For some pcie nics, to do this will also reset phy speed down counter and prevent phy from auto speed down. This patch fix the issue reported in following link. https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1547151 Signed-off-by: Chunhao Lin Signed-off-by: David S. Miller --- drivers/net/ethernet/realtek/r8169.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index dd2cf3738b73..94f08f1e841c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -1999,7 +1999,8 @@ static int rtl8169_set_speed(struct net_device *dev, goto out; if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && - (advertising & ADVERTISED_1000baseT_Full)) { + (advertising & ADVERTISED_1000baseT_Full) && + !pci_is_pcie(tp->pci_dev)) { mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); } out: -- cgit v1.2.3 From b4a53379a0c19df784e30b1f35af5841f2b74f30 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Fri, 11 Mar 2016 09:44:08 +0100 Subject: mlx4: use napi_consume_skb API to get bulk free operations Bulk free of SKBs happen transparently by the API call napi_consume_skb(). The napi budget parameter is usually needed by napi_consume_skb() to detect if called from netpoll. In this patch it has an extra meaning. For mlx4 driver, the mlx4_en_stop_port() call is done outside NAPI/softirq context, and cleanup the entire TX ring via mlx4_en_free_tx_buf(). The code mlx4_en_free_tx_desc() for freeing SKBs are shared with NAPI calls. To handle this shared use the zero budget indication is reused, and handled appropriately in napi_consume_skb(). To reflect this, variable is called napi_mode for the function call that needed this distinction. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/en_tx.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index e0946ab22010..c0d7b7296236 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -276,7 +276,8 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, - int index, u8 owner, u64 timestamp) + int index, u8 owner, u64 timestamp, + int napi_mode) { struct mlx4_en_tx_info *tx_info = &ring->tx_info[index]; struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE; @@ -347,7 +348,8 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, } } } - dev_consume_skb_any(skb); + napi_consume_skb(skb, napi_mode); + return tx_info->nr_txbb; } @@ -371,7 +373,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) while (ring->cons != ring->prod) { ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring, ring->cons & ring->size_mask, - !!(ring->cons & ring->size), 0); + !!(ring->cons & ring->size), 0, + 0 /* Non-NAPI caller */); ring->cons += ring->last_nr_txbb; cnt++; } @@ -385,7 +388,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) } static bool mlx4_en_process_tx_cq(struct net_device *dev, - struct mlx4_en_cq *cq) + struct mlx4_en_cq *cq, int napi_budget) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_cq *mcq = &cq->mcq; @@ -451,7 +454,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, last_nr_txbb = mlx4_en_free_tx_desc( priv, ring, ring_index, !!((ring_cons + txbbs_skipped) & - ring->size), timestamp); + ring->size), timestamp, napi_budget); mlx4_en_stamp_wqe(priv, ring, stamp_index, !!((ring_cons + txbbs_stamp) & @@ -511,7 +514,7 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget) struct mlx4_en_priv *priv = netdev_priv(dev); int clean_complete; - clean_complete = mlx4_en_process_tx_cq(dev, cq); + clean_complete = mlx4_en_process_tx_cq(dev, cq, budget); if (!clean_complete) return budget; -- cgit v1.2.3 From 8ec736e556e3340b4b4295c7567b0766d6629702 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Fri, 11 Mar 2016 09:44:17 +0100 Subject: mlx5: use napi_consume_skb API to get bulk free operations Bulk free of SKBs happen transparently by the API call napi_consume_skb(). The napi budget parameter is needed by napi_consume_skb() to detect if called from netpoll. Signed-off-by: Jesper Dangaard Brouer Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0f76d321030f..0cb4a093958b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -629,7 +629,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); void mlx5e_completion_event(struct mlx5_core_cq *mcq); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); int mlx5e_napi_poll(struct napi_struct *napi, int budget); -bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq); +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq); struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 94a14f85f70d..1ffc7cb6f78c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -339,7 +339,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) return mlx5e_sq_xmit(sq, skb); } -bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) +bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) { struct mlx5e_sq *sq; u32 dma_fifo_cc; @@ -411,7 +411,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) npkts++; nbytes += wi->num_bytes; sqcc += wi->num_wqebbs; - dev_kfree_skb(skb); + napi_consume_skb(skb, napi_budget); } while (!last_wqe); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 66d51a77609e..9bb4395aceeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -60,7 +60,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); for (i = 0; i < c->num_tc; i++) - busy |= mlx5e_poll_tx_cq(&c->sq[i].cq); + busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); busy |= work_done == budget; -- cgit v1.2.3 From 98267311fe3b334ae7c107fa0e2413adcf3ba735 Mon Sep 17 00:00:00 2001 From: Zefir Kurtisi Date: Fri, 11 Mar 2016 15:31:53 +0100 Subject: at803x: fix suspend/resume for SGMII link When operating the at803x in SGMII mode, resuming the chip from power down brings up the copper-side link but leaves the SGMII link in unconnected state (tested with at8031 attached to gianfar). In effect, this caused a permanent link loss once the related interface was put down. This patch ensures that power down handling in supspend() and resume() is also applied to the SGMII link. Signed-off-by: Zefir Kurtisi Signed-off-by: David S. Miller --- drivers/net/phy/at803x.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 2174ec937b4d..1e901c7cfaac 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -52,6 +52,9 @@ #define AT803X_DEBUG_REG_5 0x05 #define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8) +#define AT803X_REG_CHIP_CONFIG 0x1f +#define AT803X_BT_BX_REG_SEL 0x8000 + #define ATH8030_PHY_ID 0x004dd076 #define ATH8031_PHY_ID 0x004dd074 #define ATH8035_PHY_ID 0x004dd072 @@ -206,6 +209,7 @@ static int at803x_suspend(struct phy_device *phydev) { int value; int wol_enabled; + int ccr; mutex_lock(&phydev->lock); @@ -221,6 +225,16 @@ static int at803x_suspend(struct phy_device *phydev) phy_write(phydev, MII_BMCR, value); + if (phydev->interface != PHY_INTERFACE_MODE_SGMII) + goto done; + + /* also power-down SGMII interface */ + ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); + phy_write(phydev, MII_BMCR, phy_read(phydev, MII_BMCR) | BMCR_PDOWN); + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); + +done: mutex_unlock(&phydev->lock); return 0; @@ -229,6 +243,7 @@ static int at803x_suspend(struct phy_device *phydev) static int at803x_resume(struct phy_device *phydev) { int value; + int ccr; mutex_lock(&phydev->lock); @@ -236,6 +251,17 @@ static int at803x_resume(struct phy_device *phydev) value &= ~(BMCR_PDOWN | BMCR_ISOLATE); phy_write(phydev, MII_BMCR, value); + if (phydev->interface != PHY_INTERFACE_MODE_SGMII) + goto done; + + /* also power-up SGMII interface */ + ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG); + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr & ~AT803X_BT_BX_REG_SEL); + value = phy_read(phydev, MII_BMCR) & ~(BMCR_PDOWN | BMCR_ISOLATE); + phy_write(phydev, MII_BMCR, value); + phy_write(phydev, AT803X_REG_CHIP_CONFIG, ccr | AT803X_BT_BX_REG_SEL); + +done: mutex_unlock(&phydev->lock); return 0; -- cgit v1.2.3 From c09440f7dcb304002dfced8c0fea289eb25f2da0 Mon Sep 17 00:00:00 2001 From: Sabrina Dubroca Date: Fri, 11 Mar 2016 18:07:33 +0100 Subject: macsec: introduce IEEE 802.1AE driver This is an implementation of MACsec/IEEE 802.1AE. This driver provides authentication and encryption of traffic in a LAN, typically with GCM-AES-128, and optional replay protection. http://standards.ieee.org/getieee802/download/802.1AE-2006.pdf Signed-off-by: Sabrina Dubroca Reviewed-by: Hannes Frederic Sowa Signed-off-by: David S. Miller --- drivers/net/Kconfig | 7 + drivers/net/Makefile | 1 + drivers/net/macsec.c | 3297 ++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 3305 insertions(+) create mode 100644 drivers/net/macsec.c (limited to 'drivers') diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f184fb5bd110..2a1ba62b7da2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -193,6 +193,13 @@ config GENEVE To compile this driver as a module, choose M here: the module will be called geneve. +config MACSEC + tristate "IEEE 802.1AE MAC-level encryption (MACsec)" + select CRYPTO_AES + select CRYPTO_GCM + ---help--- + MACsec is an encryption standard for Ethernet. + config NETCONSOLE tristate "Network console logging support" ---help--- diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 900b0c5320bb..1aa7cb845663 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_IPVLAN) += ipvlan/ obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_EQUALIZER) += eql.o obj-$(CONFIG_IFB) += ifb.o +obj-$(CONFIG_MACSEC) += macsec.o obj-$(CONFIG_MACVLAN) += macvlan.o obj-$(CONFIG_MACVTAP) += macvtap.o obj-$(CONFIG_MII) += mii.o diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c new file mode 100644 index 000000000000..84d3e5ca8817 --- /dev/null +++ b/drivers/net/macsec.c @@ -0,0 +1,3297 @@ +/* + * drivers/net/macsec.c - MACsec device + * + * Copyright (c) 2015 Sabrina Dubroca + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +typedef u64 __bitwise sci_t; + +#define MACSEC_SCI_LEN 8 + +/* SecTAG length = macsec_eth_header without the optional SCI */ +#define MACSEC_TAG_LEN 6 + +struct macsec_eth_header { + struct ethhdr eth; + /* SecTAG */ + u8 tci_an; +#if defined(__LITTLE_ENDIAN_BITFIELD) + u8 short_length:6, + unused:2; +#elif defined(__BIG_ENDIAN_BITFIELD) + u8 unused:2, + short_length:6; +#else +#error "Please fix " +#endif + __be32 packet_number; + u8 secure_channel_id[8]; /* optional */ +} __packed; + +#define MACSEC_TCI_VERSION 0x80 +#define MACSEC_TCI_ES 0x40 /* end station */ +#define MACSEC_TCI_SC 0x20 /* SCI present */ +#define MACSEC_TCI_SCB 0x10 /* epon */ +#define MACSEC_TCI_E 0x08 /* encryption */ +#define MACSEC_TCI_C 0x04 /* changed text */ +#define MACSEC_AN_MASK 0x03 /* association number */ +#define MACSEC_TCI_CONFID (MACSEC_TCI_E | MACSEC_TCI_C) + +/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */ +#define MIN_NON_SHORT_LEN 48 + +#define GCM_AES_IV_LEN 12 +#define DEFAULT_ICV_LEN 16 + +#define MACSEC_NUM_AN 4 /* 2 bits for the association number */ + +#define for_each_rxsc(secy, sc) \ + for (sc = rcu_dereference_bh(secy->rx_sc); \ + sc; \ + sc = rcu_dereference_bh(sc->next)) +#define for_each_rxsc_rtnl(secy, sc) \ + for (sc = rtnl_dereference(secy->rx_sc); \ + sc; \ + sc = rtnl_dereference(sc->next)) + +struct gcm_iv { + union { + u8 secure_channel_id[8]; + sci_t sci; + }; + __be32 pn; +}; + +/** + * struct macsec_key - SA key + * @id: user-provided key identifier + * @tfm: crypto struct, key storage + */ +struct macsec_key { + u64 id; + struct crypto_aead *tfm; +}; + +struct macsec_rx_sc_stats { + __u64 InOctetsValidated; + __u64 InOctetsDecrypted; + __u64 InPktsUnchecked; + __u64 InPktsDelayed; + __u64 InPktsOK; + __u64 InPktsInvalid; + __u64 InPktsLate; + __u64 InPktsNotValid; + __u64 InPktsNotUsingSA; + __u64 InPktsUnusedSA; +}; + +struct macsec_rx_sa_stats { + __u32 InPktsOK; + __u32 InPktsInvalid; + __u32 InPktsNotValid; + __u32 InPktsNotUsingSA; + __u32 InPktsUnusedSA; +}; + +struct macsec_tx_sa_stats { + __u32 OutPktsProtected; + __u32 OutPktsEncrypted; +}; + +struct macsec_tx_sc_stats { + __u64 OutPktsProtected; + __u64 OutPktsEncrypted; + __u64 OutOctetsProtected; + __u64 OutOctetsEncrypted; +}; + +struct macsec_dev_stats { + __u64 OutPktsUntagged; + __u64 InPktsUntagged; + __u64 OutPktsTooLong; + __u64 InPktsNoTag; + __u64 InPktsBadTag; + __u64 InPktsUnknownSCI; + __u64 InPktsNoSCI; + __u64 InPktsOverrun; +}; + +/** + * struct macsec_rx_sa - receive secure association + * @active: + * @next_pn: packet number expected for the next packet + * @lock: protects next_pn manipulations + * @key: key structure + * @stats: per-SA stats + */ +struct macsec_rx_sa { + struct macsec_key key; + spinlock_t lock; + u32 next_pn; + atomic_t refcnt; + bool active; + struct macsec_rx_sa_stats __percpu *stats; + struct macsec_rx_sc *sc; + struct rcu_head rcu; +}; + +struct pcpu_rx_sc_stats { + struct macsec_rx_sc_stats stats; + struct u64_stats_sync syncp; +}; + +/** + * struct macsec_rx_sc - receive secure channel + * @sci: secure channel identifier for this SC + * @active: channel is active + * @sa: array of secure associations + * @stats: per-SC stats + */ +struct macsec_rx_sc { + struct macsec_rx_sc __rcu *next; + sci_t sci; + bool active; + struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN]; + struct pcpu_rx_sc_stats __percpu *stats; + atomic_t refcnt; + struct rcu_head rcu_head; +}; + +/** + * struct macsec_tx_sa - transmit secure association + * @active: + * @next_pn: packet number to use for the next packet + * @lock: protects next_pn manipulations + * @key: key structure + * @stats: per-SA stats + */ +struct macsec_tx_sa { + struct macsec_key key; + spinlock_t lock; + u32 next_pn; + atomic_t refcnt; + bool active; + struct macsec_tx_sa_stats __percpu *stats; + struct rcu_head rcu; +}; + +struct pcpu_tx_sc_stats { + struct macsec_tx_sc_stats stats; + struct u64_stats_sync syncp; +}; + +/** + * struct macsec_tx_sc - transmit secure channel + * @active: + * @encoding_sa: association number of the SA currently in use + * @encrypt: encrypt packets on transmit, or authenticate only + * @send_sci: always include the SCI in the SecTAG + * @end_station: + * @scb: single copy broadcast flag + * @sa: array of secure associations + * @stats: stats for this TXSC + */ +struct macsec_tx_sc { + bool active; + u8 encoding_sa; + bool encrypt; + bool send_sci; + bool end_station; + bool scb; + struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN]; + struct pcpu_tx_sc_stats __percpu *stats; +}; + +#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT + +/** + * struct macsec_secy - MACsec Security Entity + * @netdev: netdevice for this SecY + * @n_rx_sc: number of receive secure channels configured on this SecY + * @sci: secure channel identifier used for tx + * @key_len: length of keys used by the cipher suite + * @icv_len: length of ICV used by the cipher suite + * @validate_frames: validation mode + * @operational: MAC_Operational flag + * @protect_frames: enable protection for this SecY + * @replay_protect: enable packet number checks on receive + * @replay_window: size of the replay window + * @tx_sc: transmit secure channel + * @rx_sc: linked list of receive secure channels + */ +struct macsec_secy { + struct net_device *netdev; + unsigned int n_rx_sc; + sci_t sci; + u16 key_len; + u16 icv_len; + enum macsec_validation_type validate_frames; + bool operational; + bool protect_frames; + bool replay_protect; + u32 replay_window; + struct macsec_tx_sc tx_sc; + struct macsec_rx_sc __rcu *rx_sc; +}; + +struct pcpu_secy_stats { + struct macsec_dev_stats stats; + struct u64_stats_sync syncp; +}; + +/** + * struct macsec_dev - private data + * @secy: SecY config + * @real_dev: pointer to underlying netdevice + * @stats: MACsec device stats + * @secys: linked list of SecY's on the underlying device + */ +struct macsec_dev { + struct macsec_secy secy; + struct net_device *real_dev; + struct pcpu_secy_stats __percpu *stats; + struct list_head secys; +}; + +/** + * struct macsec_rxh_data - rx_handler private argument + * @secys: linked list of SecY's on this underlying device + */ +struct macsec_rxh_data { + struct list_head secys; +}; + +static struct macsec_dev *macsec_priv(const struct net_device *dev) +{ + return (struct macsec_dev *)netdev_priv(dev); +} + +static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev) +{ + return rcu_dereference_bh(dev->rx_handler_data); +} + +static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev) +{ + return rtnl_dereference(dev->rx_handler_data); +} + +struct macsec_cb { + struct aead_request *req; + union { + struct macsec_tx_sa *tx_sa; + struct macsec_rx_sa *rx_sa; + }; + u8 assoc_num; + bool valid; + bool has_sci; +}; + +static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) +{ + struct macsec_rx_sa *sa = rcu_dereference_bh(ptr); + + if (!sa || !sa->active) + return NULL; + + if (!atomic_inc_not_zero(&sa->refcnt)) + return NULL; + + return sa; +} + +static void free_rx_sc_rcu(struct rcu_head *head) +{ + struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); + + free_percpu(rx_sc->stats); + kfree(rx_sc); +} + +static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc) +{ + return atomic_inc_not_zero(&sc->refcnt) ? sc : NULL; +} + +static void macsec_rxsc_put(struct macsec_rx_sc *sc) +{ + if (atomic_dec_and_test(&sc->refcnt)) + call_rcu(&sc->rcu_head, free_rx_sc_rcu); +} + +static void free_rxsa(struct rcu_head *head) +{ + struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu); + + crypto_free_aead(sa->key.tfm); + free_percpu(sa->stats); + macsec_rxsc_put(sa->sc); + kfree(sa); +} + +static void macsec_rxsa_put(struct macsec_rx_sa *sa) +{ + if (atomic_dec_and_test(&sa->refcnt)) + call_rcu(&sa->rcu, free_rxsa); +} + +static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr) +{ + struct macsec_tx_sa *sa = rcu_dereference_bh(ptr); + + if (!sa || !sa->active) + return NULL; + + if (!atomic_inc_not_zero(&sa->refcnt)) + return NULL; + + return sa; +} + +static void free_txsa(struct rcu_head *head) +{ + struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu); + + crypto_free_aead(sa->key.tfm); + free_percpu(sa->stats); + kfree(sa); +} + +static void macsec_txsa_put(struct macsec_tx_sa *sa) +{ + if (atomic_dec_and_test(&sa->refcnt)) + call_rcu(&sa->rcu, free_txsa); +} + +static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb)); + return (struct macsec_cb *)skb->cb; +} + +#define MACSEC_PORT_ES (htons(0x0001)) +#define MACSEC_PORT_SCB (0x0000) +#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL) + +#define DEFAULT_SAK_LEN 16 +#define DEFAULT_SEND_SCI true +#define DEFAULT_ENCRYPT false +#define DEFAULT_ENCODING_SA 0 + +static sci_t make_sci(u8 *addr, __be16 port) +{ + sci_t sci; + + memcpy(&sci, addr, ETH_ALEN); + memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port)); + + return sci; +} + +static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present) +{ + sci_t sci; + + if (sci_present) + memcpy(&sci, hdr->secure_channel_id, + sizeof(hdr->secure_channel_id)); + else + sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES); + + return sci; +} + +static unsigned int macsec_sectag_len(bool sci_present) +{ + return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0); +} + +static unsigned int macsec_hdr_len(bool sci_present) +{ + return macsec_sectag_len(sci_present) + ETH_HLEN; +} + +static unsigned int macsec_extra_len(bool sci_present) +{ + return macsec_sectag_len(sci_present) + sizeof(__be16); +} + +/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */ +static void macsec_fill_sectag(struct macsec_eth_header *h, + const struct macsec_secy *secy, u32 pn) +{ + const struct macsec_tx_sc *tx_sc = &secy->tx_sc; + + memset(&h->tci_an, 0, macsec_sectag_len(tx_sc->send_sci)); + h->eth.h_proto = htons(ETH_P_MACSEC); + + if (tx_sc->send_sci || + (secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb)) { + h->tci_an |= MACSEC_TCI_SC; + memcpy(&h->secure_channel_id, &secy->sci, + sizeof(h->secure_channel_id)); + } else { + if (tx_sc->end_station) + h->tci_an |= MACSEC_TCI_ES; + if (tx_sc->scb) + h->tci_an |= MACSEC_TCI_SCB; + } + + h->packet_number = htonl(pn); + + /* with GCM, C/E clear for !encrypt, both set for encrypt */ + if (tx_sc->encrypt) + h->tci_an |= MACSEC_TCI_CONFID; + else if (secy->icv_len != DEFAULT_ICV_LEN) + h->tci_an |= MACSEC_TCI_C; + + h->tci_an |= tx_sc->encoding_sa; +} + +static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len) +{ + if (data_len < MIN_NON_SHORT_LEN) + h->short_length = data_len; +} + +/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */ +static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len) +{ + struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data; + int len = skb->len - 2 * ETH_ALEN; + int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len; + + /* a) It comprises at least 17 octets */ + if (skb->len <= 16) + return false; + + /* b) MACsec EtherType: already checked */ + + /* c) V bit is clear */ + if (h->tci_an & MACSEC_TCI_VERSION) + return false; + + /* d) ES or SCB => !SC */ + if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) && + (h->tci_an & MACSEC_TCI_SC)) + return false; + + /* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */ + if (h->unused) + return false; + + /* rx.pn != 0 (figure 10-5) */ + if (!h->packet_number) + return false; + + /* length check, f) g) h) i) */ + if (h->short_length) + return len == extra_len + h->short_length; + return len >= extra_len + MIN_NON_SHORT_LEN; +} + +#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true)) +#define MACSEC_NEEDED_TAILROOM MACSEC_MAX_ICV_LEN + +static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn) +{ + struct gcm_iv *gcm_iv = (struct gcm_iv *)iv; + + gcm_iv->sci = sci; + gcm_iv->pn = htonl(pn); +} + +static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb) +{ + return (struct macsec_eth_header *)skb_mac_header(skb); +} + +static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy) +{ + u32 pn; + + spin_lock_bh(&tx_sa->lock); + pn = tx_sa->next_pn; + + tx_sa->next_pn++; + if (tx_sa->next_pn == 0) { + pr_debug("PN wrapped, transitioning to !oper\n"); + tx_sa->active = false; + if (secy->protect_frames) + secy->operational = false; + } + spin_unlock_bh(&tx_sa->lock); + + return pn; +} + +static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) +{ + struct macsec_dev *macsec = netdev_priv(dev); + + skb->dev = macsec->real_dev; + skb_reset_mac_header(skb); + skb->protocol = eth_hdr(skb)->h_proto; +} + +static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, + struct macsec_tx_sa *tx_sa) +{ + struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); + + u64_stats_update_begin(&txsc_stats->syncp); + if (tx_sc->encrypt) { + txsc_stats->stats.OutOctetsEncrypted += skb->len; + txsc_stats->stats.OutPktsEncrypted++; + this_cpu_inc(tx_sa->stats->OutPktsEncrypted); + } else { + txsc_stats->stats.OutOctetsProtected += skb->len; + txsc_stats->stats.OutPktsProtected++; + this_cpu_inc(tx_sa->stats->OutPktsProtected); + } + u64_stats_update_end(&txsc_stats->syncp); +} + +static void count_tx(struct net_device *dev, int ret, int len) +{ + if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { + struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); + + u64_stats_update_begin(&stats->syncp); + stats->tx_packets++; + stats->tx_bytes += len; + u64_stats_update_end(&stats->syncp); + } else { + dev->stats.tx_dropped++; + } +} + +static void macsec_encrypt_done(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + struct net_device *dev = skb->dev; + struct macsec_dev *macsec = macsec_priv(dev); + struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa; + int len, ret; + + aead_request_free(macsec_skb_cb(skb)->req); + + rcu_read_lock_bh(); + macsec_encrypt_finish(skb, dev); + macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); + len = skb->len; + ret = dev_queue_xmit(skb); + count_tx(dev, ret, len); + rcu_read_unlock_bh(); + + macsec_txsa_put(sa); + dev_put(dev); +} + +static struct sk_buff *macsec_encrypt(struct sk_buff *skb, + struct net_device *dev) +{ + int ret; + struct scatterlist sg[MAX_SKB_FRAGS + 1]; + unsigned char iv[GCM_AES_IV_LEN]; + struct ethhdr *eth; + struct macsec_eth_header *hh; + size_t unprotected_len; + struct aead_request *req; + struct macsec_secy *secy; + struct macsec_tx_sc *tx_sc; + struct macsec_tx_sa *tx_sa; + struct macsec_dev *macsec = macsec_priv(dev); + u32 pn; + + secy = &macsec->secy; + tx_sc = &secy->tx_sc; + + /* 10.5.1 TX SA assignment */ + tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]); + if (!tx_sa) { + secy->operational = false; + kfree_skb(skb); + return ERR_PTR(-EINVAL); + } + + if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM || + skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) { + struct sk_buff *nskb = skb_copy_expand(skb, + MACSEC_NEEDED_HEADROOM, + MACSEC_NEEDED_TAILROOM, + GFP_ATOMIC); + if (likely(nskb)) { + consume_skb(skb); + skb = nskb; + } else { + macsec_txsa_put(tx_sa); + kfree_skb(skb); + return ERR_PTR(-ENOMEM); + } + } else { + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) { + macsec_txsa_put(tx_sa); + return ERR_PTR(-ENOMEM); + } + } + + unprotected_len = skb->len; + eth = eth_hdr(skb); + hh = (struct macsec_eth_header *)skb_push(skb, macsec_extra_len(tx_sc->send_sci)); + memmove(hh, eth, 2 * ETH_ALEN); + + pn = tx_sa_update_pn(tx_sa, secy); + if (pn == 0) { + macsec_txsa_put(tx_sa); + kfree_skb(skb); + return ERR_PTR(-ENOLINK); + } + macsec_fill_sectag(hh, secy, pn); + macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN); + + macsec_fill_iv(iv, secy->sci, pn); + + skb_put(skb, secy->icv_len); + + if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) { + struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); + + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.OutPktsTooLong++; + u64_stats_update_end(&secy_stats->syncp); + + macsec_txsa_put(tx_sa); + kfree_skb(skb); + return ERR_PTR(-EINVAL); + } + + req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC); + if (!req) { + macsec_txsa_put(tx_sa); + kfree_skb(skb); + return ERR_PTR(-ENOMEM); + } + + sg_init_table(sg, MAX_SKB_FRAGS + 1); + skb_to_sgvec(skb, sg, 0, skb->len); + + if (tx_sc->encrypt) { + int len = skb->len - macsec_hdr_len(tx_sc->send_sci) - + secy->icv_len; + aead_request_set_crypt(req, sg, sg, len, iv); + aead_request_set_ad(req, macsec_hdr_len(tx_sc->send_sci)); + } else { + aead_request_set_crypt(req, sg, sg, 0, iv); + aead_request_set_ad(req, skb->len - secy->icv_len); + } + + macsec_skb_cb(skb)->req = req; + macsec_skb_cb(skb)->tx_sa = tx_sa; + aead_request_set_callback(req, 0, macsec_encrypt_done, skb); + + dev_hold(skb->dev); + ret = crypto_aead_encrypt(req); + if (ret == -EINPROGRESS) { + return ERR_PTR(ret); + } else if (ret != 0) { + dev_put(skb->dev); + kfree_skb(skb); + aead_request_free(req); + macsec_txsa_put(tx_sa); + return ERR_PTR(-EINVAL); + } + + dev_put(skb->dev); + aead_request_free(req); + macsec_txsa_put(tx_sa); + + return skb; +} + +static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn) +{ + struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; + struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats); + struct macsec_eth_header *hdr = macsec_ethhdr(skb); + u32 lowest_pn = 0; + + spin_lock(&rx_sa->lock); + if (rx_sa->next_pn >= secy->replay_window) + lowest_pn = rx_sa->next_pn - secy->replay_window; + + /* Now perform replay protection check again + * (see IEEE 802.1AE-2006 figure 10-5) + */ + if (secy->replay_protect && pn < lowest_pn) { + spin_unlock(&rx_sa->lock); + u64_stats_update_begin(&rxsc_stats->syncp); + rxsc_stats->stats.InPktsLate++; + u64_stats_update_end(&rxsc_stats->syncp); + return false; + } + + if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { + u64_stats_update_begin(&rxsc_stats->syncp); + if (hdr->tci_an & MACSEC_TCI_E) + rxsc_stats->stats.InOctetsDecrypted += skb->len; + else + rxsc_stats->stats.InOctetsValidated += skb->len; + u64_stats_update_end(&rxsc_stats->syncp); + } + + if (!macsec_skb_cb(skb)->valid) { + spin_unlock(&rx_sa->lock); + + /* 10.6.5 */ + if (hdr->tci_an & MACSEC_TCI_C || + secy->validate_frames == MACSEC_VALIDATE_STRICT) { + u64_stats_update_begin(&rxsc_stats->syncp); + rxsc_stats->stats.InPktsNotValid++; + u64_stats_update_end(&rxsc_stats->syncp); + return false; + } + + u64_stats_update_begin(&rxsc_stats->syncp); + if (secy->validate_frames == MACSEC_VALIDATE_CHECK) { + rxsc_stats->stats.InPktsInvalid++; + this_cpu_inc(rx_sa->stats->InPktsInvalid); + } else if (pn < lowest_pn) { + rxsc_stats->stats.InPktsDelayed++; + } else { + rxsc_stats->stats.InPktsUnchecked++; + } + u64_stats_update_end(&rxsc_stats->syncp); + } else { + u64_stats_update_begin(&rxsc_stats->syncp); + if (pn < lowest_pn) { + rxsc_stats->stats.InPktsDelayed++; + } else { + rxsc_stats->stats.InPktsOK++; + this_cpu_inc(rx_sa->stats->InPktsOK); + } + u64_stats_update_end(&rxsc_stats->syncp); + + if (pn >= rx_sa->next_pn) + rx_sa->next_pn = pn + 1; + spin_unlock(&rx_sa->lock); + } + + return true; +} + +static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev) +{ + skb->pkt_type = PACKET_HOST; + skb->protocol = eth_type_trans(skb, dev); + + skb_reset_network_header(skb); + if (!skb_transport_header_was_set(skb)) + skb_reset_transport_header(skb); + skb_reset_mac_len(skb); +} + +static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len) +{ + memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN); + skb_pull(skb, hdr_len); + pskb_trim_unique(skb, skb->len - icv_len); +} + +static void count_rx(struct net_device *dev, int len) +{ + struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets++; + stats->rx_bytes += len; + u64_stats_update_end(&stats->syncp); +} + +static void macsec_decrypt_done(struct crypto_async_request *base, int err) +{ + struct sk_buff *skb = base->data; + struct net_device *dev = skb->dev; + struct macsec_dev *macsec = macsec_priv(dev); + struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa; + int len, ret; + u32 pn; + + aead_request_free(macsec_skb_cb(skb)->req); + + rcu_read_lock_bh(); + pn = ntohl(macsec_ethhdr(skb)->packet_number); + if (!macsec_post_decrypt(skb, &macsec->secy, pn)) { + rcu_read_unlock_bh(); + kfree_skb(skb); + goto out; + } + + macsec_finalize_skb(skb, macsec->secy.icv_len, + macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + macsec_reset_skb(skb, macsec->secy.netdev); + + len = skb->len; + ret = netif_rx(skb); + if (ret == NET_RX_SUCCESS) + count_rx(dev, len); + else + macsec->secy.netdev->stats.rx_dropped++; + + rcu_read_unlock_bh(); + +out: + macsec_rxsa_put(rx_sa); + dev_put(dev); + return; +} + +static struct sk_buff *macsec_decrypt(struct sk_buff *skb, + struct net_device *dev, + struct macsec_rx_sa *rx_sa, + sci_t sci, + struct macsec_secy *secy) +{ + int ret; + struct scatterlist sg[MAX_SKB_FRAGS + 1]; + unsigned char iv[GCM_AES_IV_LEN]; + struct aead_request *req; + struct macsec_eth_header *hdr; + u16 icv_len = secy->icv_len; + + macsec_skb_cb(skb)->valid = false; + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + return NULL; + + req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC); + if (!req) { + kfree_skb(skb); + return NULL; + } + + hdr = (struct macsec_eth_header *)skb->data; + macsec_fill_iv(iv, sci, ntohl(hdr->packet_number)); + + sg_init_table(sg, MAX_SKB_FRAGS + 1); + skb_to_sgvec(skb, sg, 0, skb->len); + + if (hdr->tci_an & MACSEC_TCI_E) { + /* confidentiality: ethernet + macsec header + * authenticated, encrypted payload + */ + int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci); + + aead_request_set_crypt(req, sg, sg, len, iv); + aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci)); + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) { + aead_request_free(req); + return NULL; + } + } else { + /* integrity only: all headers + data authenticated */ + aead_request_set_crypt(req, sg, sg, icv_len, iv); + aead_request_set_ad(req, skb->len - icv_len); + } + + macsec_skb_cb(skb)->req = req; + macsec_skb_cb(skb)->rx_sa = rx_sa; + skb->dev = dev; + aead_request_set_callback(req, 0, macsec_decrypt_done, skb); + + dev_hold(dev); + ret = crypto_aead_decrypt(req); + if (ret == -EINPROGRESS) { + return NULL; + } else if (ret != 0) { + /* decryption/authentication failed + * 10.6 if validateFrames is disabled, deliver anyway + */ + if (ret != -EBADMSG) { + kfree_skb(skb); + skb = NULL; + } + } else { + macsec_skb_cb(skb)->valid = true; + } + dev_put(dev); + + aead_request_free(req); + + return skb; +} + +static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci) +{ + struct macsec_rx_sc *rx_sc; + + for_each_rxsc(secy, rx_sc) { + if (rx_sc->sci == sci) + return rx_sc; + } + + return NULL; +} + +static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci) +{ + struct macsec_rx_sc *rx_sc; + + for_each_rxsc_rtnl(secy, rx_sc) { + if (rx_sc->sci == sci) + return rx_sc; + } + + return NULL; +} + +static void handle_not_macsec(struct sk_buff *skb) +{ + struct macsec_rxh_data *rxd; + struct macsec_dev *macsec; + + rcu_read_lock(); + rxd = macsec_data_rcu(skb->dev); + + /* 10.6 If the management control validateFrames is not + * Strict, frames without a SecTAG are received, counted, and + * delivered to the Controlled Port + */ + list_for_each_entry_rcu(macsec, &rxd->secys, secys) { + struct sk_buff *nskb; + int ret; + struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats); + + if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.InPktsNoTag++; + u64_stats_update_end(&secy_stats->syncp); + continue; + } + + /* deliver on this port */ + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + break; + + nskb->dev = macsec->secy.netdev; + + ret = netif_rx(nskb); + if (ret == NET_RX_SUCCESS) { + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.InPktsUntagged++; + u64_stats_update_end(&secy_stats->syncp); + } else { + macsec->secy.netdev->stats.rx_dropped++; + } + } + + rcu_read_unlock(); +} + +static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct net_device *dev = skb->dev; + struct macsec_eth_header *hdr; + struct macsec_secy *secy = NULL; + struct macsec_rx_sc *rx_sc; + struct macsec_rx_sa *rx_sa; + struct macsec_rxh_data *rxd; + struct macsec_dev *macsec; + sci_t sci; + u32 pn; + bool cbit; + struct pcpu_rx_sc_stats *rxsc_stats; + struct pcpu_secy_stats *secy_stats; + bool pulled_sci; + + if (skb_headroom(skb) < ETH_HLEN) + goto drop_direct; + + hdr = macsec_ethhdr(skb); + if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) { + handle_not_macsec(skb); + + /* and deliver to the uncontrolled port */ + return RX_HANDLER_PASS; + } + + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) { + *pskb = NULL; + return RX_HANDLER_CONSUMED; + } + + pulled_sci = pskb_may_pull(skb, macsec_extra_len(true)); + if (!pulled_sci) { + if (!pskb_may_pull(skb, macsec_extra_len(false))) + goto drop_direct; + } + + hdr = macsec_ethhdr(skb); + + /* Frames with a SecTAG that has the TCI E bit set but the C + * bit clear are discarded, as this reserved encoding is used + * to identify frames with a SecTAG that are not to be + * delivered to the Controlled Port. + */ + if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E) + return RX_HANDLER_PASS; + + /* now, pull the extra length */ + if (hdr->tci_an & MACSEC_TCI_SC) { + if (!pulled_sci) + goto drop_direct; + } + + /* ethernet header is part of crypto processing */ + skb_push(skb, ETH_HLEN); + + macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC); + macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK; + sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci); + + rcu_read_lock(); + rxd = macsec_data_rcu(skb->dev); + + list_for_each_entry_rcu(macsec, &rxd->secys, secys) { + struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci); + + if (sc) { + secy = &macsec->secy; + rx_sc = sc; + break; + } + } + + if (!secy) + goto nosci; + + dev = secy->netdev; + macsec = macsec_priv(dev); + secy_stats = this_cpu_ptr(macsec->stats); + rxsc_stats = this_cpu_ptr(rx_sc->stats); + + if (!macsec_validate_skb(skb, secy->icv_len)) { + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.InPktsBadTag++; + u64_stats_update_end(&secy_stats->syncp); + goto drop_nosa; + } + + rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]); + if (!rx_sa) { + /* 10.6.1 if the SA is not in use */ + + /* If validateFrames is Strict or the C bit in the + * SecTAG is set, discard + */ + if (hdr->tci_an & MACSEC_TCI_C || + secy->validate_frames == MACSEC_VALIDATE_STRICT) { + u64_stats_update_begin(&rxsc_stats->syncp); + rxsc_stats->stats.InPktsNotUsingSA++; + u64_stats_update_end(&rxsc_stats->syncp); + goto drop_nosa; + } + + /* not Strict, the frame (with the SecTAG and ICV + * removed) is delivered to the Controlled Port. + */ + u64_stats_update_begin(&rxsc_stats->syncp); + rxsc_stats->stats.InPktsUnusedSA++; + u64_stats_update_end(&rxsc_stats->syncp); + goto deliver; + } + + /* First, PN check to avoid decrypting obviously wrong packets */ + pn = ntohl(hdr->packet_number); + if (secy->replay_protect) { + bool late; + + spin_lock(&rx_sa->lock); + late = rx_sa->next_pn >= secy->replay_window && + pn < (rx_sa->next_pn - secy->replay_window); + spin_unlock(&rx_sa->lock); + + if (late) { + u64_stats_update_begin(&rxsc_stats->syncp); + rxsc_stats->stats.InPktsLate++; + u64_stats_update_end(&rxsc_stats->syncp); + goto drop; + } + } + + /* Disabled && !changed text => skip validation */ + if (hdr->tci_an & MACSEC_TCI_C || + secy->validate_frames != MACSEC_VALIDATE_DISABLED) + skb = macsec_decrypt(skb, dev, rx_sa, sci, secy); + + if (!skb) { + macsec_rxsa_put(rx_sa); + rcu_read_unlock(); + *pskb = NULL; + return RX_HANDLER_CONSUMED; + } + + if (!macsec_post_decrypt(skb, secy, pn)) + goto drop; + +deliver: + macsec_finalize_skb(skb, secy->icv_len, + macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + macsec_reset_skb(skb, secy->netdev); + + macsec_rxsa_put(rx_sa); + count_rx(dev, skb->len); + + rcu_read_unlock(); + + *pskb = skb; + return RX_HANDLER_ANOTHER; + +drop: + macsec_rxsa_put(rx_sa); +drop_nosa: + rcu_read_unlock(); +drop_direct: + kfree_skb(skb); + *pskb = NULL; + return RX_HANDLER_CONSUMED; + +nosci: + /* 10.6.1 if the SC is not found */ + cbit = !!(hdr->tci_an & MACSEC_TCI_C); + if (!cbit) + macsec_finalize_skb(skb, DEFAULT_ICV_LEN, + macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + + list_for_each_entry_rcu(macsec, &rxd->secys, secys) { + struct sk_buff *nskb; + int ret; + + secy_stats = this_cpu_ptr(macsec->stats); + + /* If validateFrames is Strict or the C bit in the + * SecTAG is set, discard + */ + if (cbit || + macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) { + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.InPktsNoSCI++; + u64_stats_update_end(&secy_stats->syncp); + continue; + } + + /* not strict, the frame (with the SecTAG and ICV + * removed) is delivered to the Controlled Port. + */ + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + break; + + macsec_reset_skb(nskb, macsec->secy.netdev); + + ret = netif_rx(nskb); + if (ret == NET_RX_SUCCESS) { + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.InPktsUnknownSCI++; + u64_stats_update_end(&secy_stats->syncp); + } else { + macsec->secy.netdev->stats.rx_dropped++; + } + } + + rcu_read_unlock(); + *pskb = skb; + return RX_HANDLER_PASS; +} + +static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len) +{ + struct crypto_aead *tfm; + int ret; + + tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC); + if (!tfm || IS_ERR(tfm)) + return NULL; + + ret = crypto_aead_setkey(tfm, key, key_len); + if (ret < 0) { + crypto_free_aead(tfm); + return NULL; + } + + ret = crypto_aead_setauthsize(tfm, icv_len); + if (ret < 0) { + crypto_free_aead(tfm); + return NULL; + } + + return tfm; +} + +static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len, + int icv_len) +{ + rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats); + if (!rx_sa->stats) + return -1; + + rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); + if (!rx_sa->key.tfm) { + free_percpu(rx_sa->stats); + return -1; + } + + rx_sa->active = false; + rx_sa->next_pn = 1; + atomic_set(&rx_sa->refcnt, 1); + spin_lock_init(&rx_sa->lock); + + return 0; +} + +static void clear_rx_sa(struct macsec_rx_sa *rx_sa) +{ + rx_sa->active = false; + + macsec_rxsa_put(rx_sa); +} + +static void free_rx_sc(struct macsec_rx_sc *rx_sc) +{ + int i; + + for (i = 0; i < MACSEC_NUM_AN; i++) { + struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]); + + RCU_INIT_POINTER(rx_sc->sa[i], NULL); + if (sa) + clear_rx_sa(sa); + } + + macsec_rxsc_put(rx_sc); +} + +static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) +{ + struct macsec_rx_sc *rx_sc, __rcu **rx_scp; + + for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp); + rx_sc; + rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) { + if (rx_sc->sci == sci) { + if (rx_sc->active) + secy->n_rx_sc--; + rcu_assign_pointer(*rx_scp, rx_sc->next); + return rx_sc; + } + } + + return NULL; +} + +static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) +{ + struct macsec_rx_sc *rx_sc; + struct macsec_dev *macsec; + struct net_device *real_dev = macsec_priv(dev)->real_dev; + struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); + struct macsec_secy *secy; + + list_for_each_entry(macsec, &rxd->secys, secys) { + if (find_rx_sc_rtnl(&macsec->secy, sci)) + return ERR_PTR(-EEXIST); + } + + rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL); + if (!rx_sc) + return ERR_PTR(-ENOMEM); + + rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats); + if (!rx_sc->stats) { + kfree(rx_sc); + return ERR_PTR(-ENOMEM); + } + + rx_sc->sci = sci; + rx_sc->active = true; + atomic_set(&rx_sc->refcnt, 1); + + secy = &macsec_priv(dev)->secy; + rcu_assign_pointer(rx_sc->next, secy->rx_sc); + rcu_assign_pointer(secy->rx_sc, rx_sc); + + if (rx_sc->active) + secy->n_rx_sc++; + + return rx_sc; +} + +static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len, + int icv_len) +{ + tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats); + if (!tx_sa->stats) + return -1; + + tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len); + if (!tx_sa->key.tfm) { + free_percpu(tx_sa->stats); + return -1; + } + + tx_sa->active = false; + atomic_set(&tx_sa->refcnt, 1); + spin_lock_init(&tx_sa->lock); + + return 0; +} + +static void clear_tx_sa(struct macsec_tx_sa *tx_sa) +{ + tx_sa->active = false; + + macsec_txsa_put(tx_sa); +} + +static struct genl_family macsec_fam = { + .id = GENL_ID_GENERATE, + .name = MACSEC_GENL_NAME, + .hdrsize = 0, + .version = MACSEC_GENL_VERSION, + .maxattr = MACSEC_ATTR_MAX, + .netnsok = true, +}; + +static struct net_device *get_dev_from_nl(struct net *net, + struct nlattr **attrs) +{ + int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]); + struct net_device *dev; + + dev = __dev_get_by_index(net, ifindex); + if (!dev) + return ERR_PTR(-ENODEV); + + if (!netif_is_macsec(dev)) + return ERR_PTR(-ENODEV); + + return dev; +} + +static sci_t nla_get_sci(const struct nlattr *nla) +{ + return (__force sci_t)nla_get_u64(nla); +} + +static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value) +{ + return nla_put_u64(skb, attrtype, (__force u64)value); +} + +static struct macsec_tx_sa *get_txsa_from_nl(struct net *net, + struct nlattr **attrs, + struct nlattr **tb_sa, + struct net_device **devp, + struct macsec_secy **secyp, + struct macsec_tx_sc **scp, + u8 *assoc_num) +{ + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_tx_sc *tx_sc; + struct macsec_tx_sa *tx_sa; + + if (!tb_sa[MACSEC_SA_ATTR_AN]) + return ERR_PTR(-EINVAL); + + *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); + + dev = get_dev_from_nl(net, attrs); + if (IS_ERR(dev)) + return ERR_CAST(dev); + + if (*assoc_num >= MACSEC_NUM_AN) + return ERR_PTR(-EINVAL); + + secy = &macsec_priv(dev)->secy; + tx_sc = &secy->tx_sc; + + tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]); + if (!tx_sa) + return ERR_PTR(-ENODEV); + + *devp = dev; + *scp = tx_sc; + *secyp = secy; + return tx_sa; +} + +static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net, + struct nlattr **attrs, + struct nlattr **tb_rxsc, + struct net_device **devp, + struct macsec_secy **secyp) +{ + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + sci_t sci; + + dev = get_dev_from_nl(net, attrs); + if (IS_ERR(dev)) + return ERR_CAST(dev); + + secy = &macsec_priv(dev)->secy; + + if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) + return ERR_PTR(-EINVAL); + + sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); + rx_sc = find_rx_sc_rtnl(secy, sci); + if (!rx_sc) + return ERR_PTR(-ENODEV); + + *secyp = secy; + *devp = dev; + + return rx_sc; +} + +static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net, + struct nlattr **attrs, + struct nlattr **tb_rxsc, + struct nlattr **tb_sa, + struct net_device **devp, + struct macsec_secy **secyp, + struct macsec_rx_sc **scp, + u8 *assoc_num) +{ + struct macsec_rx_sc *rx_sc; + struct macsec_rx_sa *rx_sa; + + if (!tb_sa[MACSEC_SA_ATTR_AN]) + return ERR_PTR(-EINVAL); + + *assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); + if (*assoc_num >= MACSEC_NUM_AN) + return ERR_PTR(-EINVAL); + + rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp); + if (IS_ERR(rx_sc)) + return ERR_CAST(rx_sc); + + rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]); + if (!rx_sa) + return ERR_PTR(-ENODEV); + + *scp = rx_sc; + return rx_sa; +} + + +static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = { + [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 }, + [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED }, + [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED }, +}; + +static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = { + [MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 }, + [MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 }, +}; + +static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = { + [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 }, + [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 }, + [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 }, + [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 }, + [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY, + .len = MACSEC_MAX_KEY_LEN, }, +}; + +static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa) +{ + if (!attrs[MACSEC_ATTR_SA_CONFIG]) + return -EINVAL; + + if (nla_parse_nested(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], + macsec_genl_sa_policy)) + return -EINVAL; + + return 0; +} + +static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc) +{ + if (!attrs[MACSEC_ATTR_RXSC_CONFIG]) + return -EINVAL; + + if (nla_parse_nested(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], + macsec_genl_rxsc_policy)) + return -EINVAL; + + return 0; +} + +static bool validate_add_rxsa(struct nlattr **attrs) +{ + if (!attrs[MACSEC_SA_ATTR_AN] || + !attrs[MACSEC_SA_ATTR_KEY] || + !attrs[MACSEC_SA_ATTR_KEYID]) + return false; + + if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) + return false; + + if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + return false; + + if (attrs[MACSEC_SA_ATTR_ACTIVE]) { + if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) + return false; + } + + return true; +} + +static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) +{ + struct net_device *dev; + struct nlattr **attrs = info->attrs; + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + struct macsec_rx_sa *rx_sa; + unsigned char assoc_num; + struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_sa_config(attrs, tb_sa)) + return -EINVAL; + + if (parse_rxsc_config(attrs, tb_rxsc)) + return -EINVAL; + + if (!validate_add_rxsa(tb_sa)) + return -EINVAL; + + rtnl_lock(); + rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); + if (IS_ERR(rx_sc) || !macsec_rxsc_get(rx_sc)) { + rtnl_unlock(); + return PTR_ERR(rx_sc); + } + + assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); + + if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { + pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n", + nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); + rtnl_unlock(); + return -EINVAL; + } + + rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]); + if (rx_sa) { + rtnl_unlock(); + return -EBUSY; + } + + rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL); + if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len, + secy->icv_len)) { + rtnl_unlock(); + return -ENOMEM; + } + + if (tb_sa[MACSEC_SA_ATTR_PN]) { + spin_lock_bh(&rx_sa->lock); + rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&rx_sa->lock); + } + + if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) + rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + + rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); + rx_sa->sc = rx_sc; + rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa); + + rtnl_unlock(); + + return 0; +} + +static bool validate_add_rxsc(struct nlattr **attrs) +{ + if (!attrs[MACSEC_RXSC_ATTR_SCI]) + return false; + + if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) { + if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1) + return false; + } + + return true; +} + +static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) +{ + struct net_device *dev; + sci_t sci = MACSEC_UNDEF_SCI; + struct nlattr **attrs = info->attrs; + struct macsec_rx_sc *rx_sc; + struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_rxsc_config(attrs, tb_rxsc)) + return -EINVAL; + + if (!validate_add_rxsc(tb_rxsc)) + return -EINVAL; + + rtnl_lock(); + dev = get_dev_from_nl(genl_info_net(info), attrs); + if (IS_ERR(dev)) { + rtnl_unlock(); + return PTR_ERR(dev); + } + + sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); + + rx_sc = create_rx_sc(dev, sci); + if (IS_ERR(rx_sc)) { + rtnl_unlock(); + return PTR_ERR(rx_sc); + } + + if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) + rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + + rtnl_unlock(); + + return 0; +} + +static bool validate_add_txsa(struct nlattr **attrs) +{ + if (!attrs[MACSEC_SA_ATTR_AN] || + !attrs[MACSEC_SA_ATTR_PN] || + !attrs[MACSEC_SA_ATTR_KEY] || + !attrs[MACSEC_SA_ATTR_KEYID]) + return false; + + if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) + return false; + + if (nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + return false; + + if (attrs[MACSEC_SA_ATTR_ACTIVE]) { + if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) + return false; + } + + return true; +} + +static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) +{ + struct net_device *dev; + struct nlattr **attrs = info->attrs; + struct macsec_secy *secy; + struct macsec_tx_sc *tx_sc; + struct macsec_tx_sa *tx_sa; + unsigned char assoc_num; + struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_sa_config(attrs, tb_sa)) + return -EINVAL; + + if (!validate_add_txsa(tb_sa)) + return -EINVAL; + + rtnl_lock(); + dev = get_dev_from_nl(genl_info_net(info), attrs); + if (IS_ERR(dev)) { + rtnl_unlock(); + return PTR_ERR(dev); + } + + secy = &macsec_priv(dev)->secy; + tx_sc = &secy->tx_sc; + + assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]); + + if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) { + pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n", + nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len); + rtnl_unlock(); + return -EINVAL; + } + + tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]); + if (tx_sa) { + rtnl_unlock(); + return -EBUSY; + } + + tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL); + if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), + secy->key_len, secy->icv_len)) { + rtnl_unlock(); + return -ENOMEM; + } + + tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]); + + spin_lock_bh(&tx_sa->lock); + tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&tx_sa->lock); + + if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) + tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + + if (assoc_num == tx_sc->encoding_sa && tx_sa->active) + secy->operational = true; + + rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa); + + rtnl_unlock(); + + return 0; +} + +static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + struct macsec_rx_sa *rx_sa; + u8 assoc_num; + struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_sa_config(attrs, tb_sa)) + return -EINVAL; + + if (parse_rxsc_config(attrs, tb_rxsc)) + return -EINVAL; + + rtnl_lock(); + rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, + &dev, &secy, &rx_sc, &assoc_num); + if (IS_ERR(rx_sa)) { + rtnl_unlock(); + return PTR_ERR(rx_sa); + } + + if (rx_sa->active) { + rtnl_unlock(); + return -EBUSY; + } + + RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL); + clear_rx_sa(rx_sa); + + rtnl_unlock(); + + return 0; +} + +static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + sci_t sci; + struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_rxsc_config(attrs, tb_rxsc)) + return -EINVAL; + + if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI]) + return -EINVAL; + + rtnl_lock(); + dev = get_dev_from_nl(genl_info_net(info), info->attrs); + if (IS_ERR(dev)) { + rtnl_unlock(); + return PTR_ERR(dev); + } + + secy = &macsec_priv(dev)->secy; + sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); + + rx_sc = del_rx_sc(secy, sci); + if (!rx_sc) { + rtnl_unlock(); + return -ENODEV; + } + + free_rx_sc(rx_sc); + rtnl_unlock(); + + return 0; +} + +static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_tx_sc *tx_sc; + struct macsec_tx_sa *tx_sa; + u8 assoc_num; + struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_sa_config(attrs, tb_sa)) + return -EINVAL; + + rtnl_lock(); + tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, + &dev, &secy, &tx_sc, &assoc_num); + if (IS_ERR(tx_sa)) { + rtnl_unlock(); + return PTR_ERR(tx_sa); + } + + if (tx_sa->active) { + rtnl_unlock(); + return -EBUSY; + } + + RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL); + clear_tx_sa(tx_sa); + + rtnl_unlock(); + + return 0; +} + +static bool validate_upd_sa(struct nlattr **attrs) +{ + if (!attrs[MACSEC_SA_ATTR_AN] || + attrs[MACSEC_SA_ATTR_KEY] || + attrs[MACSEC_SA_ATTR_KEYID]) + return false; + + if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN) + return false; + + if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u32(attrs[MACSEC_SA_ATTR_PN]) == 0) + return false; + + if (attrs[MACSEC_SA_ATTR_ACTIVE]) { + if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1) + return false; + } + + return true; +} + +static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_tx_sc *tx_sc; + struct macsec_tx_sa *tx_sa; + u8 assoc_num; + struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_sa_config(attrs, tb_sa)) + return -EINVAL; + + if (!validate_upd_sa(tb_sa)) + return -EINVAL; + + rtnl_lock(); + tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa, + &dev, &secy, &tx_sc, &assoc_num); + if (IS_ERR(tx_sa)) { + rtnl_unlock(); + return PTR_ERR(tx_sa); + } + + if (tb_sa[MACSEC_SA_ATTR_PN]) { + spin_lock_bh(&tx_sa->lock); + tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&tx_sa->lock); + } + + if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) + tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + + if (assoc_num == tx_sc->encoding_sa) + secy->operational = tx_sa->active; + + rtnl_unlock(); + + return 0; +} + +static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + struct macsec_rx_sa *rx_sa; + u8 assoc_num; + struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_rxsc_config(attrs, tb_rxsc)) + return -EINVAL; + + if (parse_sa_config(attrs, tb_sa)) + return -EINVAL; + + if (!validate_upd_sa(tb_sa)) + return -EINVAL; + + rtnl_lock(); + rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa, + &dev, &secy, &rx_sc, &assoc_num); + if (IS_ERR(rx_sa)) { + rtnl_unlock(); + return PTR_ERR(rx_sa); + } + + if (tb_sa[MACSEC_SA_ATTR_PN]) { + spin_lock_bh(&rx_sa->lock); + rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]); + spin_unlock_bh(&rx_sa->lock); + } + + if (tb_sa[MACSEC_SA_ATTR_ACTIVE]) + rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]); + + rtnl_unlock(); + return 0; +} + +static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info) +{ + struct nlattr **attrs = info->attrs; + struct net_device *dev; + struct macsec_secy *secy; + struct macsec_rx_sc *rx_sc; + struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; + + if (!attrs[MACSEC_ATTR_IFINDEX]) + return -EINVAL; + + if (parse_rxsc_config(attrs, tb_rxsc)) + return -EINVAL; + + if (!validate_add_rxsc(tb_rxsc)) + return -EINVAL; + + rtnl_lock(); + rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy); + if (IS_ERR(rx_sc)) { + rtnl_unlock(); + return PTR_ERR(rx_sc); + } + + if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) { + bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + + if (rx_sc->active != new) + secy->n_rx_sc += new ? 1 : -1; + + rx_sc->active = new; + } + + rtnl_unlock(); + + return 0; +} + +static int copy_tx_sa_stats(struct sk_buff *skb, + struct macsec_tx_sa_stats __percpu *pstats) +{ + struct macsec_tx_sa_stats sum = {0, }; + int cpu; + + for_each_possible_cpu(cpu) { + const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu); + + sum.OutPktsProtected += stats->OutPktsProtected; + sum.OutPktsEncrypted += stats->OutPktsEncrypted; + } + + if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted)) + return -EMSGSIZE; + + return 0; +} + +static int copy_rx_sa_stats(struct sk_buff *skb, + struct macsec_rx_sa_stats __percpu *pstats) +{ + struct macsec_rx_sa_stats sum = {0, }; + int cpu; + + for_each_possible_cpu(cpu) { + const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu); + + sum.InPktsOK += stats->InPktsOK; + sum.InPktsInvalid += stats->InPktsInvalid; + sum.InPktsNotValid += stats->InPktsNotValid; + sum.InPktsNotUsingSA += stats->InPktsNotUsingSA; + sum.InPktsUnusedSA += stats->InPktsUnusedSA; + } + + if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || + nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) + return -EMSGSIZE; + + return 0; +} + +static int copy_rx_sc_stats(struct sk_buff *skb, + struct pcpu_rx_sc_stats __percpu *pstats) +{ + struct macsec_rx_sc_stats sum = {0, }; + int cpu; + + for_each_possible_cpu(cpu) { + const struct pcpu_rx_sc_stats *stats; + struct macsec_rx_sc_stats tmp; + unsigned int start; + + stats = per_cpu_ptr(pstats, cpu); + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + memcpy(&tmp, &stats->stats, sizeof(tmp)); + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + sum.InOctetsValidated += tmp.InOctetsValidated; + sum.InOctetsDecrypted += tmp.InOctetsDecrypted; + sum.InPktsUnchecked += tmp.InPktsUnchecked; + sum.InPktsDelayed += tmp.InPktsDelayed; + sum.InPktsOK += tmp.InPktsOK; + sum.InPktsInvalid += tmp.InPktsInvalid; + sum.InPktsLate += tmp.InPktsLate; + sum.InPktsNotValid += tmp.InPktsNotValid; + sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA; + sum.InPktsUnusedSA += tmp.InPktsUnusedSA; + } + + if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) || + nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA)) + return -EMSGSIZE; + + return 0; +} + +static int copy_tx_sc_stats(struct sk_buff *skb, + struct pcpu_tx_sc_stats __percpu *pstats) +{ + struct macsec_tx_sc_stats sum = {0, }; + int cpu; + + for_each_possible_cpu(cpu) { + const struct pcpu_tx_sc_stats *stats; + struct macsec_tx_sc_stats tmp; + unsigned int start; + + stats = per_cpu_ptr(pstats, cpu); + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + memcpy(&tmp, &stats->stats, sizeof(tmp)); + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + sum.OutPktsProtected += tmp.OutPktsProtected; + sum.OutPktsEncrypted += tmp.OutPktsEncrypted; + sum.OutOctetsProtected += tmp.OutOctetsProtected; + sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted; + } + + if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) || + nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) || + nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) || + nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted)) + return -EMSGSIZE; + + return 0; +} + +static int copy_secy_stats(struct sk_buff *skb, + struct pcpu_secy_stats __percpu *pstats) +{ + struct macsec_dev_stats sum = {0, }; + int cpu; + + for_each_possible_cpu(cpu) { + const struct pcpu_secy_stats *stats; + struct macsec_dev_stats tmp; + unsigned int start; + + stats = per_cpu_ptr(pstats, cpu); + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + memcpy(&tmp, &stats->stats, sizeof(tmp)); + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + sum.OutPktsUntagged += tmp.OutPktsUntagged; + sum.InPktsUntagged += tmp.InPktsUntagged; + sum.OutPktsTooLong += tmp.OutPktsTooLong; + sum.InPktsNoTag += tmp.InPktsNoTag; + sum.InPktsBadTag += tmp.InPktsBadTag; + sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI; + sum.InPktsNoSCI += tmp.InPktsNoSCI; + sum.InPktsOverrun += tmp.InPktsOverrun; + } + + if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) || + nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun)) + return -EMSGSIZE; + + return 0; +} + +static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb) +{ + struct macsec_tx_sc *tx_sc = &secy->tx_sc; + struct nlattr *secy_nest = nla_nest_start(skb, MACSEC_ATTR_SECY); + + if (!secy_nest) + return 1; + + if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) || + nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) || + nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) || + nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) || + nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) || + nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) || + nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) || + nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) || + nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) || + nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) || + nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) || + nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa)) + goto cancel; + + if (secy->replay_protect) { + if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window)) + goto cancel; + } + + nla_nest_end(skb, secy_nest); + return 0; + +cancel: + nla_nest_cancel(skb, secy_nest); + return 1; +} + +static int dump_secy(struct macsec_secy *secy, struct net_device *dev, + struct sk_buff *skb, struct netlink_callback *cb) +{ + struct macsec_rx_sc *rx_sc; + struct macsec_tx_sc *tx_sc = &secy->tx_sc; + struct nlattr *txsa_list, *rxsc_list; + int i, j; + void *hdr; + struct nlattr *attr; + + hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, + &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC); + if (!hdr) + return -EMSGSIZE; + + rtnl_lock(); + + if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex)) + goto nla_put_failure; + + if (nla_put_secy(secy, skb)) + goto nla_put_failure; + + attr = nla_nest_start(skb, MACSEC_ATTR_TXSC_STATS); + if (!attr) + goto nla_put_failure; + if (copy_tx_sc_stats(skb, tx_sc->stats)) { + nla_nest_cancel(skb, attr); + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + attr = nla_nest_start(skb, MACSEC_ATTR_SECY_STATS); + if (!attr) + goto nla_put_failure; + if (copy_secy_stats(skb, macsec_priv(dev)->stats)) { + nla_nest_cancel(skb, attr); + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + txsa_list = nla_nest_start(skb, MACSEC_ATTR_TXSA_LIST); + if (!txsa_list) + goto nla_put_failure; + for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) { + struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]); + struct nlattr *txsa_nest; + + if (!tx_sa) + continue; + + txsa_nest = nla_nest_start(skb, j++); + if (!txsa_nest) { + nla_nest_cancel(skb, txsa_list); + goto nla_put_failure; + } + + if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || + nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) || + nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) || + nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) { + nla_nest_cancel(skb, txsa_nest); + nla_nest_cancel(skb, txsa_list); + goto nla_put_failure; + } + + attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); + if (!attr) { + nla_nest_cancel(skb, txsa_nest); + nla_nest_cancel(skb, txsa_list); + goto nla_put_failure; + } + if (copy_tx_sa_stats(skb, tx_sa->stats)) { + nla_nest_cancel(skb, attr); + nla_nest_cancel(skb, txsa_nest); + nla_nest_cancel(skb, txsa_list); + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + nla_nest_end(skb, txsa_nest); + } + nla_nest_end(skb, txsa_list); + + rxsc_list = nla_nest_start(skb, MACSEC_ATTR_RXSC_LIST); + if (!rxsc_list) + goto nla_put_failure; + + j = 1; + for_each_rxsc_rtnl(secy, rx_sc) { + int k; + struct nlattr *rxsa_list; + struct nlattr *rxsc_nest = nla_nest_start(skb, j++); + + if (!rxsc_nest) { + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + + if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) || + nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) { + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + + attr = nla_nest_start(skb, MACSEC_RXSC_ATTR_STATS); + if (!attr) { + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + if (copy_rx_sc_stats(skb, rx_sc->stats)) { + nla_nest_cancel(skb, attr); + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + rxsa_list = nla_nest_start(skb, MACSEC_RXSC_ATTR_SA_LIST); + if (!rxsa_list) { + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + + for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) { + struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]); + struct nlattr *rxsa_nest; + + if (!rx_sa) + continue; + + rxsa_nest = nla_nest_start(skb, k++); + if (!rxsa_nest) { + nla_nest_cancel(skb, rxsa_list); + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + + attr = nla_nest_start(skb, MACSEC_SA_ATTR_STATS); + if (!attr) { + nla_nest_cancel(skb, rxsa_list); + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + if (copy_rx_sa_stats(skb, rx_sa->stats)) { + nla_nest_cancel(skb, attr); + nla_nest_cancel(skb, rxsa_list); + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + nla_nest_end(skb, attr); + + if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) || + nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) || + nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) || + nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) { + nla_nest_cancel(skb, rxsa_nest); + nla_nest_cancel(skb, rxsc_nest); + nla_nest_cancel(skb, rxsc_list); + goto nla_put_failure; + } + nla_nest_end(skb, rxsa_nest); + } + + nla_nest_end(skb, rxsa_list); + nla_nest_end(skb, rxsc_nest); + } + + nla_nest_end(skb, rxsc_list); + + rtnl_unlock(); + + genlmsg_end(skb, hdr); + + return 0; + +nla_put_failure: + rtnl_unlock(); + genlmsg_cancel(skb, hdr); + return -EMSGSIZE; +} + +static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb) +{ + struct net *net = sock_net(skb->sk); + struct net_device *dev; + int dev_idx, d; + + dev_idx = cb->args[0]; + + d = 0; + for_each_netdev(net, dev) { + struct macsec_secy *secy; + + if (d < dev_idx) + goto next; + + if (!netif_is_macsec(dev)) + goto next; + + secy = &macsec_priv(dev)->secy; + if (dump_secy(secy, dev, skb, cb) < 0) + goto done; +next: + d++; + } + +done: + cb->args[0] = d; + return skb->len; +} + +static const struct genl_ops macsec_genl_ops[] = { + { + .cmd = MACSEC_CMD_GET_TXSC, + .dumpit = macsec_dump_txsc, + .policy = macsec_genl_policy, + }, + { + .cmd = MACSEC_CMD_ADD_RXSC, + .doit = macsec_add_rxsc, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_DEL_RXSC, + .doit = macsec_del_rxsc, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_UPD_RXSC, + .doit = macsec_upd_rxsc, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_ADD_TXSA, + .doit = macsec_add_txsa, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_DEL_TXSA, + .doit = macsec_del_txsa, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_UPD_TXSA, + .doit = macsec_upd_txsa, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_ADD_RXSA, + .doit = macsec_add_rxsa, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_DEL_RXSA, + .doit = macsec_del_rxsa, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, + { + .cmd = MACSEC_CMD_UPD_RXSA, + .doit = macsec_upd_rxsa, + .policy = macsec_genl_policy, + .flags = GENL_ADMIN_PERM, + }, +}; + +static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct macsec_dev *macsec = netdev_priv(dev); + struct macsec_secy *secy = &macsec->secy; + struct pcpu_secy_stats *secy_stats; + int ret, len; + + /* 10.5 */ + if (!secy->protect_frames) { + secy_stats = this_cpu_ptr(macsec->stats); + u64_stats_update_begin(&secy_stats->syncp); + secy_stats->stats.OutPktsUntagged++; + u64_stats_update_end(&secy_stats->syncp); + len = skb->len; + ret = dev_queue_xmit(skb); + count_tx(dev, ret, len); + return ret; + } + + if (!secy->operational) { + kfree_skb(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + skb = macsec_encrypt(skb, dev); + if (IS_ERR(skb)) { + if (PTR_ERR(skb) != -EINPROGRESS) + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); + + macsec_encrypt_finish(skb, dev); + len = skb->len; + ret = dev_queue_xmit(skb); + count_tx(dev, ret, len); + return ret; +} + +#define MACSEC_FEATURES \ + (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) +static int macsec_dev_init(struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + + dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; + + dev->features = real_dev->features & MACSEC_FEATURES; + dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; + + dev->needed_headroom = real_dev->needed_headroom + + MACSEC_NEEDED_HEADROOM; + dev->needed_tailroom = real_dev->needed_tailroom + + MACSEC_NEEDED_TAILROOM; + + if (is_zero_ether_addr(dev->dev_addr)) + eth_hw_addr_inherit(dev, real_dev); + if (is_zero_ether_addr(dev->broadcast)) + memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); + + return 0; +} + +static void macsec_dev_uninit(struct net_device *dev) +{ + free_percpu(dev->tstats); +} + +static netdev_features_t macsec_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + + features &= real_dev->features & MACSEC_FEATURES; + features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; + + return features; +} + +static int macsec_dev_open(struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + int err; + + if (!(real_dev->flags & IFF_UP)) + return -ENETDOWN; + + err = dev_uc_add(real_dev, dev->dev_addr); + if (err < 0) + return err; + + if (dev->flags & IFF_ALLMULTI) { + err = dev_set_allmulti(real_dev, 1); + if (err < 0) + goto del_unicast; + } + + if (dev->flags & IFF_PROMISC) { + err = dev_set_promiscuity(real_dev, 1); + if (err < 0) + goto clear_allmulti; + } + + if (netif_carrier_ok(real_dev)) + netif_carrier_on(dev); + + return 0; +clear_allmulti: + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(real_dev, -1); +del_unicast: + dev_uc_del(real_dev, dev->dev_addr); + netif_carrier_off(dev); + return err; +} + +static int macsec_dev_stop(struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + + netif_carrier_off(dev); + + dev_mc_unsync(real_dev, dev); + dev_uc_unsync(real_dev, dev); + + if (dev->flags & IFF_ALLMULTI) + dev_set_allmulti(real_dev, -1); + + if (dev->flags & IFF_PROMISC) + dev_set_promiscuity(real_dev, -1); + + dev_uc_del(real_dev, dev->dev_addr); + + return 0; +} + +static void macsec_dev_change_rx_flags(struct net_device *dev, int change) +{ + struct net_device *real_dev = macsec_priv(dev)->real_dev; + + if (!(dev->flags & IFF_UP)) + return; + + if (change & IFF_ALLMULTI) + dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); + + if (change & IFF_PROMISC) + dev_set_promiscuity(real_dev, + dev->flags & IFF_PROMISC ? 1 : -1); +} + +static void macsec_dev_set_rx_mode(struct net_device *dev) +{ + struct net_device *real_dev = macsec_priv(dev)->real_dev; + + dev_mc_sync(real_dev, dev); + dev_uc_sync(real_dev, dev); +} + +static int macsec_set_mac_address(struct net_device *dev, void *p) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + struct sockaddr *addr = p; + int err; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (!(dev->flags & IFF_UP)) + goto out; + + err = dev_uc_add(real_dev, addr->sa_data); + if (err < 0) + return err; + + dev_uc_del(real_dev, dev->dev_addr); + +out: + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; +} + +static int macsec_change_mtu(struct net_device *dev, int new_mtu) +{ + struct macsec_dev *macsec = macsec_priv(dev); + unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true); + + if (macsec->real_dev->mtu - extra < new_mtu) + return -ERANGE; + + dev->mtu = new_mtu; + + return 0; +} + +static struct rtnl_link_stats64 *macsec_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + int cpu; + + if (!dev->tstats) + return s; + + for_each_possible_cpu(cpu) { + struct pcpu_sw_netstats *stats; + struct pcpu_sw_netstats tmp; + int start; + + stats = per_cpu_ptr(dev->tstats, cpu); + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + tmp.rx_packets = stats->rx_packets; + tmp.rx_bytes = stats->rx_bytes; + tmp.tx_packets = stats->tx_packets; + tmp.tx_bytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + s->rx_packets += tmp.rx_packets; + s->rx_bytes += tmp.rx_bytes; + s->tx_packets += tmp.tx_packets; + s->tx_bytes += tmp.tx_bytes; + } + + s->rx_dropped = dev->stats.rx_dropped; + s->tx_dropped = dev->stats.tx_dropped; + + return s; +} + +static int macsec_get_iflink(const struct net_device *dev) +{ + return macsec_priv(dev)->real_dev->ifindex; +} + +static const struct net_device_ops macsec_netdev_ops = { + .ndo_init = macsec_dev_init, + .ndo_uninit = macsec_dev_uninit, + .ndo_open = macsec_dev_open, + .ndo_stop = macsec_dev_stop, + .ndo_fix_features = macsec_fix_features, + .ndo_change_mtu = macsec_change_mtu, + .ndo_set_rx_mode = macsec_dev_set_rx_mode, + .ndo_change_rx_flags = macsec_dev_change_rx_flags, + .ndo_set_mac_address = macsec_set_mac_address, + .ndo_start_xmit = macsec_start_xmit, + .ndo_get_stats64 = macsec_get_stats64, + .ndo_get_iflink = macsec_get_iflink, +}; + +static const struct device_type macsec_type = { + .name = "macsec", +}; + +static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = { + [IFLA_MACSEC_SCI] = { .type = NLA_U64 }, + [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 }, + [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 }, + [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 }, + [IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 }, + [IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 }, + [IFLA_MACSEC_PROTECT] = { .type = NLA_U8 }, + [IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 }, + [IFLA_MACSEC_ES] = { .type = NLA_U8 }, + [IFLA_MACSEC_SCB] = { .type = NLA_U8 }, + [IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 }, + [IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 }, +}; + +static void macsec_free_netdev(struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + + free_percpu(macsec->stats); + free_percpu(macsec->secy.tx_sc.stats); + + dev_put(real_dev); + free_netdev(dev); +} + +static void macsec_setup(struct net_device *dev) +{ + ether_setup(dev); + dev->tx_queue_len = 0; + dev->netdev_ops = &macsec_netdev_ops; + dev->destructor = macsec_free_netdev; + + eth_zero_addr(dev->broadcast); +} + +static void macsec_changelink_common(struct net_device *dev, + struct nlattr *data[]) +{ + struct macsec_secy *secy; + struct macsec_tx_sc *tx_sc; + + secy = &macsec_priv(dev)->secy; + tx_sc = &secy->tx_sc; + + if (data[IFLA_MACSEC_ENCODING_SA]) { + struct macsec_tx_sa *tx_sa; + + tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]); + tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]); + + secy->operational = tx_sa && tx_sa->active; + } + + if (data[IFLA_MACSEC_WINDOW]) + secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]); + + if (data[IFLA_MACSEC_ENCRYPT]) + tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]); + + if (data[IFLA_MACSEC_PROTECT]) + secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]); + + if (data[IFLA_MACSEC_INC_SCI]) + tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); + + if (data[IFLA_MACSEC_ES]) + tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]); + + if (data[IFLA_MACSEC_SCB]) + tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]); + + if (data[IFLA_MACSEC_REPLAY_PROTECT]) + secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]); + + if (data[IFLA_MACSEC_VALIDATION]) + secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]); +} + +static int macsec_changelink(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[]) +{ + if (!data) + return 0; + + if (data[IFLA_MACSEC_CIPHER_SUITE] || + data[IFLA_MACSEC_ICV_LEN] || + data[IFLA_MACSEC_SCI] || + data[IFLA_MACSEC_PORT]) + return -EINVAL; + + macsec_changelink_common(dev, data); + + return 0; +} + +static void macsec_del_dev(struct macsec_dev *macsec) +{ + int i; + + while (macsec->secy.rx_sc) { + struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc); + + rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next); + free_rx_sc(rx_sc); + } + + for (i = 0; i < MACSEC_NUM_AN; i++) { + struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]); + + if (sa) { + RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL); + clear_tx_sa(sa); + } + } +} + +static void macsec_dellink(struct net_device *dev, struct list_head *head) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev = macsec->real_dev; + struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); + + unregister_netdevice_queue(dev, head); + list_del_rcu(&macsec->secys); + if (list_empty(&rxd->secys)) + netdev_rx_handler_unregister(real_dev); + + macsec_del_dev(macsec); +} + +static int register_macsec_dev(struct net_device *real_dev, + struct net_device *dev) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); + + if (!rxd) { + int err; + + rxd = kmalloc(sizeof(*rxd), GFP_KERNEL); + if (!rxd) + return -ENOMEM; + + INIT_LIST_HEAD(&rxd->secys); + + err = netdev_rx_handler_register(real_dev, macsec_handle_frame, + rxd); + if (err < 0) + return err; + } + + list_add_tail_rcu(&macsec->secys, &rxd->secys); + return 0; +} + +static bool sci_exists(struct net_device *dev, sci_t sci) +{ + struct macsec_rxh_data *rxd = macsec_data_rtnl(dev); + struct macsec_dev *macsec; + + list_for_each_entry(macsec, &rxd->secys, secys) { + if (macsec->secy.sci == sci) + return true; + } + + return false; +} + +static sci_t dev_to_sci(struct net_device *dev, __be16 port) +{ + return make_sci(dev->dev_addr, port); +} + +static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct macsec_secy *secy = &macsec->secy; + + macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats); + if (!macsec->stats) + return -ENOMEM; + + secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats); + if (!secy->tx_sc.stats) { + free_percpu(macsec->stats); + return -ENOMEM; + } + + if (sci == MACSEC_UNDEF_SCI) + sci = dev_to_sci(dev, MACSEC_PORT_ES); + + secy->netdev = dev; + secy->operational = true; + secy->key_len = DEFAULT_SAK_LEN; + secy->icv_len = icv_len; + secy->validate_frames = MACSEC_VALIDATE_DEFAULT; + secy->protect_frames = true; + secy->replay_protect = false; + + secy->sci = sci; + secy->tx_sc.active = true; + secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA; + secy->tx_sc.encrypt = DEFAULT_ENCRYPT; + secy->tx_sc.send_sci = DEFAULT_SEND_SCI; + secy->tx_sc.end_station = false; + secy->tx_sc.scb = false; + + return 0; +} + +static int macsec_newlink(struct net *net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[]) +{ + struct macsec_dev *macsec = macsec_priv(dev); + struct net_device *real_dev; + int err; + sci_t sci; + u8 icv_len = DEFAULT_ICV_LEN; + rx_handler_func_t *rx_handler; + + if (!tb[IFLA_LINK]) + return -EINVAL; + real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK])); + if (!real_dev) + return -ENODEV; + + dev->priv_flags |= IFF_MACSEC; + + macsec->real_dev = real_dev; + + if (data && data[IFLA_MACSEC_ICV_LEN]) + icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); + dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true); + + rx_handler = rtnl_dereference(real_dev->rx_handler); + if (rx_handler && rx_handler != macsec_handle_frame) + return -EBUSY; + + err = register_netdevice(dev); + if (err < 0) + return err; + + /* need to be already registered so that ->init has run and + * the MAC addr is set + */ + if (data && data[IFLA_MACSEC_SCI]) + sci = nla_get_sci(data[IFLA_MACSEC_SCI]); + else if (data && data[IFLA_MACSEC_PORT]) + sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT])); + else + sci = dev_to_sci(dev, MACSEC_PORT_ES); + + if (rx_handler && sci_exists(real_dev, sci)) { + err = -EBUSY; + goto unregister; + } + + err = macsec_add_dev(dev, sci, icv_len); + if (err) + goto unregister; + + if (data) + macsec_changelink_common(dev, data); + + err = register_macsec_dev(real_dev, dev); + if (err < 0) + goto del_dev; + + dev_hold(real_dev); + + return 0; + +del_dev: + macsec_del_dev(macsec); +unregister: + unregister_netdevice(dev); + return err; +} + +static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[]) +{ + u64 csid = DEFAULT_CIPHER_ID; + u8 icv_len = DEFAULT_ICV_LEN; + int flag; + bool es, scb, sci; + + if (!data) + return 0; + + if (data[IFLA_MACSEC_CIPHER_SUITE]) + csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]); + + if (data[IFLA_MACSEC_ICV_LEN]) + icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); + + switch (csid) { + case DEFAULT_CIPHER_ID: + case DEFAULT_CIPHER_ALT: + if (icv_len < MACSEC_MIN_ICV_LEN || + icv_len > MACSEC_MAX_ICV_LEN) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if (data[IFLA_MACSEC_ENCODING_SA]) { + if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN) + return -EINVAL; + } + + for (flag = IFLA_MACSEC_ENCODING_SA + 1; + flag < IFLA_MACSEC_VALIDATION; + flag++) { + if (data[flag]) { + if (nla_get_u8(data[flag]) > 1) + return -EINVAL; + } + } + + es = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false; + sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false; + scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false; + + if ((sci && (scb || es)) || (scb && es)) + return -EINVAL; + + if (data[IFLA_MACSEC_VALIDATION] && + nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX) + return -EINVAL; + + if ((data[IFLA_MACSEC_PROTECT] && + nla_get_u8(data[IFLA_MACSEC_PROTECT])) && + !data[IFLA_MACSEC_WINDOW]) + return -EINVAL; + + return 0; +} + +static struct net *macsec_get_link_net(const struct net_device *dev) +{ + return dev_net(macsec_priv(dev)->real_dev); +} + +static size_t macsec_get_size(const struct net_device *dev) +{ + return 0 + + nla_total_size(8) + /* SCI */ + nla_total_size(1) + /* ICV_LEN */ + nla_total_size(8) + /* CIPHER_SUITE */ + nla_total_size(4) + /* WINDOW */ + nla_total_size(1) + /* ENCODING_SA */ + nla_total_size(1) + /* ENCRYPT */ + nla_total_size(1) + /* PROTECT */ + nla_total_size(1) + /* INC_SCI */ + nla_total_size(1) + /* ES */ + nla_total_size(1) + /* SCB */ + nla_total_size(1) + /* REPLAY_PROTECT */ + nla_total_size(1) + /* VALIDATION */ + 0; +} + +static int macsec_fill_info(struct sk_buff *skb, + const struct net_device *dev) +{ + struct macsec_secy *secy = &macsec_priv(dev)->secy; + struct macsec_tx_sc *tx_sc = &secy->tx_sc; + + if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) || + nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) || + nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) || + nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) || + nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) || + nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) || + nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) || + nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) || + nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) || + nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) || + nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) || + 0) + goto nla_put_failure; + + if (secy->replay_protect) { + if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window)) + goto nla_put_failure; + } + + return 0; + +nla_put_failure: + return -EMSGSIZE; +} + +static struct rtnl_link_ops macsec_link_ops __read_mostly = { + .kind = "macsec", + .priv_size = sizeof(struct macsec_dev), + .maxtype = IFLA_MACSEC_MAX, + .policy = macsec_rtnl_policy, + .setup = macsec_setup, + .validate = macsec_validate_attr, + .newlink = macsec_newlink, + .changelink = macsec_changelink, + .dellink = macsec_dellink, + .get_size = macsec_get_size, + .fill_info = macsec_fill_info, + .get_link_net = macsec_get_link_net, +}; + +static bool is_macsec_master(struct net_device *dev) +{ + return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame; +} + +static int macsec_notify(struct notifier_block *this, unsigned long event, + void *ptr) +{ + struct net_device *real_dev = netdev_notifier_info_to_dev(ptr); + LIST_HEAD(head); + + if (!is_macsec_master(real_dev)) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: { + struct macsec_dev *m, *n; + struct macsec_rxh_data *rxd; + + rxd = macsec_data_rtnl(real_dev); + list_for_each_entry_safe(m, n, &rxd->secys, secys) { + macsec_dellink(m->secy.netdev, &head); + } + unregister_netdevice_many(&head); + break; + } + case NETDEV_CHANGEMTU: { + struct macsec_dev *m; + struct macsec_rxh_data *rxd; + + rxd = macsec_data_rtnl(real_dev); + list_for_each_entry(m, &rxd->secys, secys) { + struct net_device *dev = m->secy.netdev; + unsigned int mtu = real_dev->mtu - (m->secy.icv_len + + macsec_extra_len(true)); + + if (dev->mtu > mtu) + dev_set_mtu(dev, mtu); + } + } + } + + return NOTIFY_OK; +} + +static struct notifier_block macsec_notifier = { + .notifier_call = macsec_notify, +}; + +static int __init macsec_init(void) +{ + int err; + + pr_info("MACsec IEEE 802.1AE\n"); + err = register_netdevice_notifier(&macsec_notifier); + if (err) + return err; + + err = rtnl_link_register(&macsec_link_ops); + if (err) + goto notifier; + + err = genl_register_family_with_ops(&macsec_fam, macsec_genl_ops); + if (err) + goto rtnl; + + return 0; + +rtnl: + rtnl_link_unregister(&macsec_link_ops); +notifier: + unregister_netdevice_notifier(&macsec_notifier); + return err; +} + +static void __exit macsec_exit(void) +{ + genl_unregister_family(&macsec_fam); + rtnl_link_unregister(&macsec_link_ops); + unregister_netdevice_notifier(&macsec_notifier); +} + +module_init(macsec_init); +module_exit(macsec_exit); + +MODULE_ALIAS_RTNL_LINK("macsec"); + +MODULE_DESCRIPTION("MACsec IEEE 802.1AE"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3 From eb43e023130b5021c1ffad4d5c84cb310ffcb0f6 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Mon, 14 Mar 2016 09:38:56 +0100 Subject: misc: sram: add optional ioremap without write combining Some SRAM users may require non-bufferable access to the memory, which is impossible, because devm_ioremap_wc() is used for setting sram->virt_base. This commit adds optional flag 'no-memory-wc', which allow to choose remap method, using DT property. Documentation is updated accordingly. Signed-off-by: Marcin Wojtas Signed-off-by: David S. Miller --- Documentation/devicetree/bindings/sram/sram.txt | 5 +++++ drivers/misc/sram.c | 5 ++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/sram/sram.txt b/Documentation/devicetree/bindings/sram/sram.txt index 42ee9438b771..227e3a341af1 100644 --- a/Documentation/devicetree/bindings/sram/sram.txt +++ b/Documentation/devicetree/bindings/sram/sram.txt @@ -25,6 +25,11 @@ Required properties in the sram node: - ranges : standard definition, should translate from local addresses within the sram to bus addresses +Optional properties in the sram node: + +- no-memory-wc : the flag indicating, that SRAM memory region has not to + be remapped as write combining. WC is used by default. + Required properties in the area nodes: - reg : iomem address range, relative to the SRAM range diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c index 736dae715dbf..69cdabea9c03 100644 --- a/drivers/misc/sram.c +++ b/drivers/misc/sram.c @@ -360,7 +360,10 @@ static int sram_probe(struct platform_device *pdev) return -EBUSY; } - sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size); + if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc")) + sram->virt_base = devm_ioremap(sram->dev, res->start, size); + else + sram->virt_base = devm_ioremap_wc(sram->dev, res->start, size); if (IS_ERR(sram->virt_base)) return PTR_ERR(sram->virt_base); -- cgit v1.2.3 From f2900acea8018c4525ddaa86c7f7cd8afd3f0cc4 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Mon, 14 Mar 2016 09:39:02 +0100 Subject: bus: mvebu-mbus: provide api for obtaining IO and DRAM window information This commit enables finding appropriate mbus window and obtaining its target id and attribute for given physical address in two separate routines, both for IO and DRAM windows. This functionality is needed for Armada XP/38x Network Controller's Buffer Manager and PnC configuration. [gregory.clement@free-electrons.com: Fix size test for mvebu_mbus_get_dram_win_info] Signed-off-by: Marcin Wojtas [DRAM window information reference in LKv3.10] Signed-off-by: Evan Wang Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/bus/mvebu-mbus.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mbus.h | 3 +++ 2 files changed, 55 insertions(+) (limited to 'drivers') diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index c43c3d2baf73..c2e52864bb03 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c @@ -948,6 +948,58 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res) *res = mbus_state.pcie_io_aperture; } +int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr) +{ + const struct mbus_dram_target_info *dram; + int i; + + /* Get dram info */ + dram = mv_mbus_dram_info(); + if (!dram) { + pr_err("missing DRAM information\n"); + return -ENODEV; + } + + /* Try to find matching DRAM window for phyaddr */ + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + if (cs->base <= phyaddr && + phyaddr <= (cs->base + cs->size - 1)) { + *target = dram->mbus_dram_target_id; + *attr = cs->mbus_attr; + return 0; + } + } + + pr_err("invalid dram address 0x%x\n", phyaddr); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info); + +int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, + u8 *attr) +{ + int win; + + for (win = 0; win < mbus_state.soc->num_wins; win++) { + u64 wbase; + int enabled; + + mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase, + size, target, attr, NULL); + + if (!enabled) + continue; + + if (wbase <= phyaddr && phyaddr <= wbase + *size) + return win; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info); + static __init int mvebu_mbus_debugfs_init(void) { struct mvebu_mbus_state *s = &mbus_state; diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 1f7bc630d225..ea34a867caa0 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h @@ -69,6 +69,9 @@ static inline const struct mbus_dram_target_info *mv_mbus_dram_info_nooverlap(vo int mvebu_mbus_save_cpu_target(u32 *store_addr); void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); void mvebu_mbus_get_pcie_io_aperture(struct resource *res); +int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); +int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, + u8 *attr); int mvebu_mbus_add_window_remap_by_id(unsigned int target, unsigned int attribute, phys_addr_t base, size_t size, -- cgit v1.2.3 From dc35a10f68d3781c2345d60b22103785985ca849 Mon Sep 17 00:00:00 2001 From: Marcin Wojtas Date: Mon, 14 Mar 2016 09:39:03 +0100 Subject: net: mvneta: bm: add support for hardware buffer management Buffer manager (BM) is a dedicated hardware unit that can be used by all ethernet ports of Armada XP and 38x SoC's. It allows to offload CPU on RX path by sparing DRAM access on refilling buffer pool, hardware-based filling of descriptor ring data and better memory utilization due to HW arbitration for using 'short' pools for small packets. Tests performed with A388 SoC working as a network bridge between two packet generators showed increase of maximum processed 64B packets by ~20k (~555k packets with BM enabled vs ~535 packets without BM). Also when pushing 1500B-packets with a line rate achieved, CPU load decreased from around 25% without BM to 20% with BM. BM comprise up to 4 buffer pointers' (BP) rings kept in DRAM, which are called external BP pools - BPPE. Allocating and releasing buffer pointers (BP) to/from BPPE is performed indirectly by write/read access to a dedicated internal SRAM, where internal BP pools (BPPI) are placed. BM hardware controls status of BPPE automatically, as well as assigning proper buffers to RX descriptors. For more details please refer to Functional Specification of Armada XP or 38x SoC. In order to enable support for a separate hardware block, common for all ports, a new driver has to be implemented ('mvneta_bm'). It provides initialization sequence of address space, clocks, registers, SRAM, empty pools' structures and also obtaining optional configuration from DT (please refer to device tree binding documentation). mvneta_bm exposes also a necessary API to mvneta driver, as well as a dedicated structure with BM information (bm_priv), whose presence is used as a flag notifying of BM usage by port. It has to be ensured that mvneta_bm probe is executed prior to the ones in ports' driver. In case BM is not used or its probe fails, mvneta falls back to use software buffer management. A sequence executed in mvneta_probe function is modified in order to have an access to needed resources before possible port's BM initialization is done. According to port-pools mapping provided by DT appropriate registers are configured and the buffer pools are filled. RX path is modified accordingly. Becaues the hardware allows a wide variety of configuration options, following assumptions are made: * using BM mechanisms can be selectively disabled/enabled basing on DT configuration among the ports * 'long' pool's single buffer size is tied to port's MTU * using 'long' pool by port is obligatory and it cannot be shared * using 'short' pool for smaller packets is optional * one 'short' pool can be shared among all ports This commit enables hardware buffer management operation cooperating with existing mvneta driver. New device tree binding documentation is added and the one of mvneta is updated accordingly. [gregory.clement@free-electrons.com: removed the suspend/resume part] Signed-off-by: Marcin Wojtas Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- .../bindings/net/marvell-armada-370-neta.txt | 19 +- .../devicetree/bindings/net/marvell-neta-bm.txt | 49 ++ drivers/net/ethernet/marvell/Kconfig | 13 + drivers/net/ethernet/marvell/Makefile | 1 + drivers/net/ethernet/marvell/mvneta.c | 507 +++++++++++++++++-- drivers/net/ethernet/marvell/mvneta_bm.c | 546 +++++++++++++++++++++ drivers/net/ethernet/marvell/mvneta_bm.h | 189 +++++++ 7 files changed, 1285 insertions(+), 39 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/marvell-neta-bm.txt create mode 100644 drivers/net/ethernet/marvell/mvneta_bm.c create mode 100644 drivers/net/ethernet/marvell/mvneta_bm.h (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index d0cb8693963b..73be8970815e 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -18,15 +18,30 @@ Optional properties: "core" for core clock and "bus" for the optional bus clock. +Optional properties (valid only for Armada XP/38x): + +- buffer-manager: a phandle to a buffer manager node. Please refer to + Documentation/devicetree/bindings/net/marvell-neta-bm.txt +- bm,pool-long: ID of a pool, that will accept all packets of a size + higher than 'short' pool's threshold (if set) and up to MTU value. + Obligatory, when the port is supposed to use hardware + buffer management. +- bm,pool-short: ID of a pool, that will be used for accepting + packets of a size lower than given threshold. If not set, the port + will use a single 'long' pool for all packets, as defined above. + Example: -ethernet@d0070000 { +ethernet@70000 { compatible = "marvell,armada-370-neta"; - reg = <0xd0070000 0x2500>; + reg = <0x70000 0x2500>; interrupts = <8>; clocks = <&gate_clk 4>; tx-csum-limit = <9800> status = "okay"; phy = <&phy0>; phy-mode = "rgmii-id"; + buffer-manager = <&bm>; + bm,pool-long = <0>; + bm,pool-short = <1>; }; diff --git a/Documentation/devicetree/bindings/net/marvell-neta-bm.txt b/Documentation/devicetree/bindings/net/marvell-neta-bm.txt new file mode 100644 index 000000000000..c1b1d7c3bde1 --- /dev/null +++ b/Documentation/devicetree/bindings/net/marvell-neta-bm.txt @@ -0,0 +1,49 @@ +* Marvell Armada 380/XP Buffer Manager driver (BM) + +Required properties: + +- compatible: should be "marvell,armada-380-neta-bm". +- reg: address and length of the register set for the device. +- clocks: a pointer to the reference clock for this device. +- internal-mem: a phandle to BM internal SRAM definition. + +Optional properties (port): + +- pool<0 : 3>,capacity: size of external buffer pointers' ring maintained + in DRAM. Can be set for each pool (id 0 : 3) separately. The value has + to be chosen between 128 and 16352 and it also has to be aligned to 32. + Otherwise the driver would adjust a given number or choose default if + not set. +- pool<0 : 3>,pkt-size: maximum size of a packet accepted by a given buffer + pointers' pool (id 0 : 3). It will be taken into consideration only when pool + type is 'short'. For 'long' ones it would be overridden by port's MTU. + If not set a driver will choose a default value. + +In order to see how to hook the BM to a given ethernet port, please +refer to Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt. + +Example: + +- main node: + +bm: bm@c8000 { + compatible = "marvell,armada-380-neta-bm"; + reg = <0xc8000 0xac>; + clocks = <&gateclk 13>; + internal-mem = <&bm_bppi>; + status = "okay"; + pool2,capacity = <4096>; + pool1,pkt-size = <512>; +}; + +- internal SRAM node: + +bm_bppi: bm-bppi { + compatible = "mmio-sram"; + reg = ; + ranges = <0 MBUS_ID(0x0c, 0x04) 0 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + clocks = <&gateclk 13>; + status = "okay"; +}; diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index a1c862b4664d..ac6605c62f46 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -40,6 +40,19 @@ config MVMDIO This driver is used by the MV643XX_ETH and MVNETA drivers. +config MVNETA_BM + tristate "Marvell Armada 38x/XP network interface BM support" + depends on MVNETA + ---help--- + This driver supports auxiliary block of the network + interface units in the Marvell ARMADA XP and ARMADA 38x SoC + family, which is called buffer manager. + + This driver, when enabled, strictly cooperates with mvneta + driver and is common for all network ports of the devices, + even for Armada 370 SoC, which doesn't support hardware + buffer management. + config MVNETA tristate "Marvell Armada 370/38x/XP network interface support" depends on PLAT_ORION diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile index f6425bd2884b..ff1bffa74803 100644 --- a/drivers/net/ethernet/marvell/Makefile +++ b/drivers/net/ethernet/marvell/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_MVMDIO) += mvmdio.o obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o +obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o obj-$(CONFIG_MVNETA) += mvneta.o obj-$(CONFIG_MVPP2) += mvpp2.o obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index b0ae69f84493..2847c0c291de 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -30,6 +30,7 @@ #include #include #include +#include "mvneta_bm.h" #include #include #include @@ -37,6 +38,10 @@ /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) +#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 +#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 +#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 +#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) @@ -50,6 +55,9 @@ #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 +#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) +#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 +#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 #define MVNETA_PORT_RX_RESET 0x1cc0 #define MVNETA_PORT_RX_DMA_RESET BIT(0) #define MVNETA_PHY_ADDR 0x2000 @@ -107,6 +115,7 @@ #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) #define MVNETA_ACC_MODE 0x2500 +#define MVNETA_BM_ADDRESS 0x2504 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 @@ -253,7 +262,10 @@ #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 #define MVNETA_TX_CSUM_DEF_SIZE 1600 #define MVNETA_TX_CSUM_MAX_SIZE 9800 -#define MVNETA_ACC_MODE_EXT 1 +#define MVNETA_ACC_MODE_EXT1 1 +#define MVNETA_ACC_MODE_EXT2 2 + +#define MVNETA_MAX_DECODE_WIN 6 /* Timeout constants */ #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 @@ -293,7 +305,8 @@ ((addr >= txq->tso_hdrs_phys) && \ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) -#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) +#define MVNETA_RX_GET_BM_POOL_ID(rxd) \ + (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) struct mvneta_statistic { unsigned short offset; @@ -359,6 +372,7 @@ struct mvneta_pcpu_port { }; struct mvneta_port { + u8 id; struct mvneta_pcpu_port __percpu *ports; struct mvneta_pcpu_stats __percpu *stats; @@ -394,6 +408,11 @@ struct mvneta_port { unsigned int tx_csum_limit; unsigned int use_inband_status:1; + struct mvneta_bm *bm_priv; + struct mvneta_bm_pool *pool_long; + struct mvneta_bm_pool *pool_short; + int bm_win_id; + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; @@ -419,6 +438,8 @@ struct mvneta_port { #define MVNETA_TX_L4_CSUM_NOT BIT(31) #define MVNETA_RXD_ERR_CRC 0x0 +#define MVNETA_RXD_BM_POOL_SHIFT 13 +#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) #define MVNETA_RXD_ERR_SUMMARY BIT(16) #define MVNETA_RXD_ERR_OVERRUN BIT(17) #define MVNETA_RXD_ERR_LEN BIT(18) @@ -563,6 +584,9 @@ static int rxq_def; static int rx_copybreak __read_mostly = 256; +/* HW BM need that each port be identify by a unique ID */ +static int global_port_id; + #define MVNETA_DRIVER_NAME "mvneta" #define MVNETA_DRIVER_VERSION "1.0" @@ -829,6 +853,214 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp, mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); } +/* Enable buffer management (BM) */ +static void mvneta_rxq_bm_enable(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val |= MVNETA_RXQ_HW_BUF_ALLOC; + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Notify HW about port's assignment of pool for bigger packets */ +static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; + val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Notify HW about port's assignment of pool for smaller packets */ +static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, + struct mvneta_rx_queue *rxq) +{ + u32 val; + + val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); + val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; + val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); + + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); +} + +/* Set port's receive buffer size for assigned BM pool */ +static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, + int buf_size, + u8 pool_id) +{ + u32 val; + + if (!IS_ALIGNED(buf_size, 8)) { + dev_warn(pp->dev->dev.parent, + "illegal buf_size value %d, round to %d\n", + buf_size, ALIGN(buf_size, 8)); + buf_size = ALIGN(buf_size, 8); + } + + val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); + val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; + mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); +} + +/* Configure MBUS window in order to enable access BM internal SRAM */ +static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, + u8 target, u8 attr) +{ + u32 win_enable, win_protect; + int i; + + win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); + + if (pp->bm_win_id < 0) { + /* Find first not occupied window */ + for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { + if (win_enable & (1 << i)) { + pp->bm_win_id = i; + break; + } + } + if (i == MVNETA_MAX_DECODE_WIN) + return -ENOMEM; + } else { + i = pp->bm_win_id; + } + + mvreg_write(pp, MVNETA_WIN_BASE(i), 0); + mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); + + if (i < 4) + mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); + + mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | + (attr << 8) | target); + + mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); + + win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); + win_protect |= 3 << (2 * i); + mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); + + win_enable &= ~(1 << i); + mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); + + return 0; +} + +/* Assign and initialize pools for port. In case of fail + * buffer manager will remain disabled for current port. + */ +static int mvneta_bm_port_init(struct platform_device *pdev, + struct mvneta_port *pp) +{ + struct device_node *dn = pdev->dev.of_node; + u32 long_pool_id, short_pool_id, wsize; + u8 target, attr; + int err; + + /* Get BM window information */ + err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, + &target, &attr); + if (err < 0) + return err; + + pp->bm_win_id = -1; + + /* Open NETA -> BM window */ + err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, + target, attr); + if (err < 0) { + netdev_info(pp->dev, "fail to configure mbus window to BM\n"); + return err; + } + + if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { + netdev_info(pp->dev, "missing long pool id\n"); + return -EINVAL; + } + + /* Create port's long pool depending on mtu */ + pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, + MVNETA_BM_LONG, pp->id, + MVNETA_RX_PKT_SIZE(pp->dev->mtu)); + if (!pp->pool_long) { + netdev_info(pp->dev, "fail to obtain long pool for port\n"); + return -ENOMEM; + } + + pp->pool_long->port_map |= 1 << pp->id; + + mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, + pp->pool_long->id); + + /* If short pool id is not defined, assume using single pool */ + if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) + short_pool_id = long_pool_id; + + /* Create port's short pool */ + pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, + MVNETA_BM_SHORT, pp->id, + MVNETA_BM_SHORT_PKT_SIZE); + if (!pp->pool_short) { + netdev_info(pp->dev, "fail to obtain short pool for port\n"); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + return -ENOMEM; + } + + if (short_pool_id != long_pool_id) { + pp->pool_short->port_map |= 1 << pp->id; + mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, + pp->pool_short->id); + } + + return 0; +} + +/* Update settings of a pool for bigger packets */ +static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) +{ + struct mvneta_bm_pool *bm_pool = pp->pool_long; + int num; + + /* Release all buffers from long pool */ + mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); + if (bm_pool->buf_num) { + WARN(1, "cannot free all buffers in pool %d\n", + bm_pool->id); + goto bm_mtu_err; + } + + bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); + bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); + bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); + + /* Fill entire long pool */ + num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size); + if (num != bm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", + bm_pool->id, num, bm_pool->size); + goto bm_mtu_err; + } + mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); + + return; + +bm_mtu_err: + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); + + pp->bm_priv = NULL; + mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); + netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); +} + /* Start the Ethernet port RX and TX activity */ static void mvneta_port_up(struct mvneta_port *pp) { @@ -1149,9 +1381,17 @@ static void mvneta_defaults_set(struct mvneta_port *pp) mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); /* Set Port Acceleration Mode */ - val = MVNETA_ACC_MODE_EXT; + if (pp->bm_priv) + /* HW buffer management + legacy parser */ + val = MVNETA_ACC_MODE_EXT2; + else + /* SW buffer management + legacy parser */ + val = MVNETA_ACC_MODE_EXT1; mvreg_write(pp, MVNETA_ACC_MODE, val); + if (pp->bm_priv) + mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); + /* Update val of portCfg register accordingly with all RxQueue types */ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); mvreg_write(pp, MVNETA_PORT_CONFIG, val); @@ -1518,23 +1758,25 @@ static void mvneta_txq_done(struct mvneta_port *pp, } } -static void *mvneta_frag_alloc(const struct mvneta_port *pp) +void *mvneta_frag_alloc(unsigned int frag_size) { - if (likely(pp->frag_size <= PAGE_SIZE)) - return netdev_alloc_frag(pp->frag_size); + if (likely(frag_size <= PAGE_SIZE)) + return netdev_alloc_frag(frag_size); else - return kmalloc(pp->frag_size, GFP_ATOMIC); + return kmalloc(frag_size, GFP_ATOMIC); } +EXPORT_SYMBOL_GPL(mvneta_frag_alloc); -static void mvneta_frag_free(const struct mvneta_port *pp, void *data) +void mvneta_frag_free(unsigned int frag_size, void *data) { - if (likely(pp->frag_size <= PAGE_SIZE)) + if (likely(frag_size <= PAGE_SIZE)) skb_free_frag(data); else kfree(data); } +EXPORT_SYMBOL_GPL(mvneta_frag_free); -/* Refill processing */ +/* Refill processing for SW buffer management */ static int mvneta_rx_refill(struct mvneta_port *pp, struct mvneta_rx_desc *rx_desc) @@ -1542,7 +1784,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp, dma_addr_t phys_addr; void *data; - data = mvneta_frag_alloc(pp); + data = mvneta_frag_alloc(pp->frag_size); if (!data) return -ENOMEM; @@ -1550,7 +1792,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { - mvneta_frag_free(pp, data); + mvneta_frag_free(pp->frag_size, data); return -ENOMEM; } @@ -1596,22 +1838,156 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, int rx_done, i; rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + if (rx_done) + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + + if (pp->bm_priv) { + for (i = 0; i < rx_done; i++) { + struct mvneta_rx_desc *rx_desc = + mvneta_rxq_next_desc_get(rxq); + u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); + struct mvneta_bm_pool *bm_pool; + + bm_pool = &pp->bm_priv->bm_pools[pool_id]; + /* Return dropped buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + } + return; + } + for (i = 0; i < rxq->size; i++) { struct mvneta_rx_desc *rx_desc = rxq->descs + i; void *data = (void *)rx_desc->buf_cookie; dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); - mvneta_frag_free(pp, data); + mvneta_frag_free(pp->frag_size, data); } +} - if (rx_done) - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); +/* Main rx processing when using software buffer management */ +static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, + struct mvneta_rx_queue *rxq) +{ + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); + struct net_device *dev = pp->dev; + int rx_done; + u32 rcvd_pkts = 0; + u32 rcvd_bytes = 0; + + /* Get number of received packets */ + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); + + if (rx_todo > rx_done) + rx_todo = rx_done; + + rx_done = 0; + + /* Fairness NAPI loop */ + while (rx_done < rx_todo) { + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + struct sk_buff *skb; + unsigned char *data; + dma_addr_t phys_addr; + u32 rx_status, frag_size; + int rx_bytes, err; + + rx_done++; + rx_status = rx_desc->status; + rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); + data = (unsigned char *)rx_desc->buf_cookie; + phys_addr = rx_desc->buf_phys_addr; + + if (!mvneta_rxq_desc_is_first_last(rx_status) || + (rx_status & MVNETA_RXD_ERR_SUMMARY)) { +err_drop_frame: + dev->stats.rx_errors++; + mvneta_rx_error(pp, rx_desc); + /* leave the descriptor untouched */ + continue; + } + + if (rx_bytes <= rx_copybreak) { + /* better copy a small frame and not unmap the DMA region */ + skb = netdev_alloc_skb_ip_align(dev, rx_bytes); + if (unlikely(!skb)) + goto err_drop_frame; + + dma_sync_single_range_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes, + DMA_FROM_DEVICE); + memcpy(skb_put(skb, rx_bytes), + data + MVNETA_MH_SIZE + NET_SKB_PAD, + rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + mvneta_rx_csum(pp, rx_status, skb); + napi_gro_receive(&port->napi, skb); + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* leave the descriptor and buffer untouched */ + continue; + } + + /* Refill processing */ + err = mvneta_rx_refill(pp, rx_desc); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + goto err_drop_frame; + } + + frag_size = pp->frag_size; + + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); + + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ + dma_unmap_single(dev->dev.parent, phys_addr, + MVNETA_RX_BUF_SIZE(pp->pkt_size), + DMA_FROM_DEVICE); + + if (!skb) + goto err_drop_frame; + + rcvd_pkts++; + rcvd_bytes += rx_bytes; + + /* Linux processing */ + skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); + skb_put(skb, rx_bytes); + + skb->protocol = eth_type_trans(skb, dev); + + mvneta_rx_csum(pp, rx_status, skb); + + napi_gro_receive(&port->napi, skb); + } + + if (rcvd_pkts) { + struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); + + u64_stats_update_begin(&stats->syncp); + stats->rx_packets += rcvd_pkts; + stats->rx_bytes += rcvd_bytes; + u64_stats_update_end(&stats->syncp); + } + + /* Update rxq management counters */ + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); + + return rx_done; } -/* Main rx processing */ -static int mvneta_rx(struct mvneta_port *pp, int rx_todo, - struct mvneta_rx_queue *rxq) +/* Main rx processing when using hardware buffer management */ +static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo, + struct mvneta_rx_queue *rxq) { struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct net_device *dev = pp->dev; @@ -1630,21 +2006,29 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, /* Fairness NAPI loop */ while (rx_done < rx_todo) { struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); + struct mvneta_bm_pool *bm_pool = NULL; struct sk_buff *skb; unsigned char *data; dma_addr_t phys_addr; - u32 rx_status; + u32 rx_status, frag_size; int rx_bytes, err; + u8 pool_id; rx_done++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (unsigned char *)rx_desc->buf_cookie; phys_addr = rx_desc->buf_phys_addr; + pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); + bm_pool = &pp->bm_priv->bm_pools[pool_id]; if (!mvneta_rxq_desc_is_first_last(rx_status) || (rx_status & MVNETA_RXD_ERR_SUMMARY)) { - err_drop_frame: +err_drop_frame_ret_pool: + /* Return the buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); +err_drop_frame: dev->stats.rx_errors++; mvneta_rx_error(pp, rx_desc); /* leave the descriptor untouched */ @@ -1655,7 +2039,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, /* better copy a small frame and not unmap the DMA region */ skb = netdev_alloc_skb_ip_align(dev, rx_bytes); if (unlikely(!skb)) - goto err_drop_frame; + goto err_drop_frame_ret_pool; dma_sync_single_range_for_cpu(dev->dev.parent, rx_desc->buf_phys_addr, @@ -1673,26 +2057,31 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, rcvd_pkts++; rcvd_bytes += rx_bytes; + /* Return the buffer to the pool */ + mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, + rx_desc->buf_phys_addr); + /* leave the descriptor and buffer untouched */ continue; } /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc); + err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); rxq->missed++; - goto err_drop_frame; + goto err_drop_frame_ret_pool; } - skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); + frag_size = bm_pool->frag_size; + + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); /* After refill old buffer has to be unmapped regardless * the skb is successfully built or not. */ - dma_unmap_single(dev->dev.parent, phys_addr, - MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); - + dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); if (!skb) goto err_drop_frame; @@ -2297,7 +2686,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget) if (rx_queue) { rx_queue = rx_queue - 1; - rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); + if (pp->bm_priv) + rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]); + else + rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); } budget -= rx_done; @@ -2386,9 +2778,17 @@ static int mvneta_rxq_init(struct mvneta_port *pp, mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); - /* Fill RXQ with buffers from RX pool */ - mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); - mvneta_rxq_bm_disable(pp, rxq); + if (!pp->bm_priv) { + /* Fill RXQ with buffers from RX pool */ + mvneta_rxq_buf_size_set(pp, rxq, + MVNETA_RX_BUF_SIZE(pp->pkt_size)); + mvneta_rxq_bm_disable(pp, rxq); + } else { + mvneta_rxq_bm_enable(pp, rxq); + mvneta_rxq_long_pool_set(pp, rxq); + mvneta_rxq_short_pool_set(pp, rxq); + } + mvneta_rxq_fill(pp, rxq, rxq->size); return 0; @@ -2661,6 +3061,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) dev->mtu = mtu; if (!netif_running(dev)) { + if (pp->bm_priv) + mvneta_bm_update_mtu(pp, mtu); + netdev_update_features(dev); return 0; } @@ -2673,6 +3076,9 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mvneta_cleanup_txqs(pp); mvneta_cleanup_rxqs(pp); + if (pp->bm_priv) + mvneta_bm_update_mtu(pp, mtu); + pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -3557,6 +3963,7 @@ static int mvneta_probe(struct platform_device *pdev) struct resource *res; struct device_node *dn = pdev->dev.of_node; struct device_node *phy_node; + struct device_node *bm_node; struct mvneta_port *pp; struct net_device *dev; const char *dt_mac_addr; @@ -3690,26 +4097,39 @@ static int mvneta_probe(struct platform_device *pdev) pp->tx_csum_limit = tx_csum_limit; + dram_target_info = mv_mbus_dram_info(); + if (dram_target_info) + mvneta_conf_mbus_windows(pp, dram_target_info); + pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; pp->dev = dev; SET_NETDEV_DEV(dev, &pdev->dev); + pp->id = global_port_id++; + + /* Obtain access to BM resources if enabled and already initialized */ + bm_node = of_parse_phandle(dn, "buffer-manager", 0); + if (bm_node && bm_node->data) { + pp->bm_priv = bm_node->data; + err = mvneta_bm_port_init(pdev, pp); + if (err < 0) { + dev_info(&pdev->dev, "use SW buffer management\n"); + pp->bm_priv = NULL; + } + } + err = mvneta_init(&pdev->dev, pp); if (err < 0) - goto err_free_stats; + goto err_netdev; err = mvneta_port_power_up(pp, phy_mode); if (err < 0) { dev_err(&pdev->dev, "can't power up port\n"); - goto err_free_stats; + goto err_netdev; } - dram_target_info = mv_mbus_dram_info(); - if (dram_target_info) - mvneta_conf_mbus_windows(pp, dram_target_info); - for_each_present_cpu(cpu) { struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); @@ -3744,6 +4164,13 @@ static int mvneta_probe(struct platform_device *pdev) return 0; +err_netdev: + unregister_netdev(dev); + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + } err_free_stats: free_percpu(pp->stats); err_free_ports: @@ -3775,6 +4202,12 @@ static int mvneta_remove(struct platform_device *pdev) of_node_put(pp->phy_node); free_netdev(dev); + if (pp->bm_priv) { + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, + 1 << pp->id); + } + return 0; } diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c new file mode 100644 index 000000000000..8c968e7d2d8f --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -0,0 +1,546 @@ +/* + * Driver for Marvell NETA network controller Buffer Manager. + * + * Copyright (C) 2015 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mvneta_bm.h" + +#define MVNETA_BM_DRIVER_NAME "mvneta_bm" +#define MVNETA_BM_DRIVER_VERSION "1.0" + +static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) +{ + writel(data, priv->reg_base + offset); +} + +static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) +{ + return readl(priv->reg_base + offset); +} + +static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); + val |= MVNETA_BM_POOL_ENABLE_MASK; + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); + + /* Clear BM cause register */ + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); +} + +static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); + val &= ~MVNETA_BM_POOL_ENABLE_MASK; + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); +} + +static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + val |= mask; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + val &= ~mask; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, + u8 target_id, u8 attr) +{ + u32 val; + + val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); + val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); + val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); + val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); + val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); + + mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); +} + +/* Allocate skb for BM pool */ +void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + dma_addr_t *buf_phys_addr) +{ + void *buf; + dma_addr_t phys_addr; + + buf = mvneta_frag_alloc(bm_pool->frag_size); + if (!buf) + return NULL; + + /* In order to update buf_cookie field of RX descriptor properly, + * BM hardware expects buf virtual address to be placed in the + * first four bytes of mapped buffer. + */ + *(u32 *)buf = (u32)buf; + phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) { + mvneta_frag_free(bm_pool->frag_size, buf); + return NULL; + } + *buf_phys_addr = phys_addr; + + return buf; +} + +/* Refill processing for HW buffer management */ +int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + dma_addr_t buf_phys_addr; + void *buf; + + buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr); + if (!buf) + return -ENOMEM; + + mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr); + + return 0; +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill); + +/* Allocate buffers for the pool */ +int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + int buf_num) +{ + int err, i; + + if (bm_pool->buf_num == bm_pool->size) { + dev_dbg(&priv->pdev->dev, "pool %d already filled\n", + bm_pool->id); + return bm_pool->buf_num; + } + + if (buf_num < 0 || + (buf_num + bm_pool->buf_num > bm_pool->size)) { + dev_err(&priv->pdev->dev, + "cannot allocate %d buffers for pool %d\n", + buf_num, bm_pool->id); + return 0; + } + + for (i = 0; i < buf_num; i++) { + err = mvneta_bm_pool_refill(priv, bm_pool); + if (err < 0) + break; + } + + /* Update BM driver with number of buffers added to pool */ + bm_pool->buf_num += i; + + dev_dbg(&priv->pdev->dev, + "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n", + bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", + bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size, + bm_pool->frag_size); + + dev_dbg(&priv->pdev->dev, + "%s pool %d: %d of %d buffers added\n", + bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", + bm_pool->id, i, buf_num); + + return i; +} +EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add); + +/* Create pool */ +static int mvneta_bm_pool_create(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + struct platform_device *pdev = priv->pdev; + u8 target_id, attr; + int size_bytes, err; + + size_bytes = sizeof(u32) * bm_pool->size; + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, + &bm_pool->phys_addr, + GFP_KERNEL); + if (!bm_pool->virt_addr) + return -ENOMEM; + + if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", + bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); + return -ENOMEM; + } + + err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, + &attr); + if (err < 0) { + dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, + bm_pool->phys_addr); + return err; + } + + /* Set pool address */ + mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), + bm_pool->phys_addr); + + mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); + mvneta_bm_pool_enable(priv, bm_pool->id); + + return 0; +} + +/* Notify the driver that BM pool is being used as specific type and return the + * pool pointer on success + */ +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size) +{ + struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; + int num, err; + + if (new_pool->type == MVNETA_BM_LONG && + new_pool->port_map != 1 << port_id) { + dev_err(&priv->pdev->dev, + "long pool cannot be shared by the ports\n"); + return NULL; + } + + if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { + dev_err(&priv->pdev->dev, + "mixing pools' types between the ports is forbidden\n"); + return NULL; + } + + if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) + new_pool->pkt_size = pkt_size; + + /* Allocate buffers in case BM pool hasn't been used yet */ + if (new_pool->type == MVNETA_BM_FREE) { + new_pool->type = type; + new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); + new_pool->frag_size = + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + /* Create new pool */ + err = mvneta_bm_pool_create(priv, new_pool); + if (err) { + dev_err(&priv->pdev->dev, "fail to create pool %d\n", + new_pool->id); + return NULL; + } + + /* Allocate buffers for this pool */ + num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size); + if (num != new_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", + new_pool->id, num, new_pool->size); + return NULL; + } + } + + return new_pool; +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); + +/* Free all buffers from the pool */ +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map) +{ + int i; + + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; + + mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + for (i = 0; i < bm_pool->buf_num; i++) { + dma_addr_t buf_phys_addr; + u32 *vaddr; + + /* Get buffer physical address (indirect access) */ + buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); + + /* Work-around to the problems when destroying the pool, + * when it occurs that a read access to BPPI returns 0. + */ + if (buf_phys_addr == 0) + continue; + + vaddr = phys_to_virt(buf_phys_addr); + if (!vaddr) + break; + + dma_unmap_single(&priv->pdev->dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); + mvneta_frag_free(bm_pool->frag_size, vaddr); + } + + mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + /* Update BM driver with number of buffers removed from pool */ + bm_pool->buf_num -= i; +} +EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); + +/* Cleanup pool */ +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map) +{ + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; + + bm_pool->type = MVNETA_BM_FREE; + + mvneta_bm_bufs_free(priv, bm_pool, port_map); + if (bm_pool->buf_num) + WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + + if (bm_pool->virt_addr) { + dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size, + bm_pool->virt_addr, bm_pool->phys_addr); + bm_pool->virt_addr = NULL; + } + + mvneta_bm_pool_disable(priv, bm_pool->id); +} +EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); + +static void mvneta_bm_pools_init(struct mvneta_bm *priv) +{ + struct device_node *dn = priv->pdev->dev.of_node; + struct mvneta_bm_pool *bm_pool; + char prop[15]; + u32 size; + int i; + + /* Activate BM unit */ + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); + + /* Create all pools with maximum size */ + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { + bm_pool = &priv->bm_pools[i]; + bm_pool->id = i; + bm_pool->type = MVNETA_BM_FREE; + + /* Reset read pointer */ + mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); + + /* Reset write pointer */ + mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); + + /* Configure pool size according to DT or use default value */ + sprintf(prop, "pool%d,capacity", i); + if (of_property_read_u32(dn, prop, &size)) { + size = MVNETA_BM_POOL_CAP_DEF; + } else if (size > MVNETA_BM_POOL_CAP_MAX) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, set to %d\n", + i, size, MVNETA_BM_POOL_CAP_MAX); + size = MVNETA_BM_POOL_CAP_MAX; + } else if (size < MVNETA_BM_POOL_CAP_MIN) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, set to %d\n", + i, size, MVNETA_BM_POOL_CAP_MIN); + size = MVNETA_BM_POOL_CAP_MIN; + } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { + dev_warn(&priv->pdev->dev, + "Illegal pool %d capacity %d, round to %d\n", + i, size, ALIGN(size, + MVNETA_BM_POOL_CAP_ALIGN)); + size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); + } + bm_pool->size = size; + + mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), + bm_pool->size); + + /* Obtain custom pkt_size from DT */ + sprintf(prop, "pool%d,pkt-size", i); + if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) + bm_pool->pkt_size = 0; + } +} + +static void mvneta_bm_default_set(struct mvneta_bm *priv) +{ + u32 val; + + /* Mask BM all interrupts */ + mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); + + /* Clear BM cause register */ + mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); + + /* Set BM configuration register */ + val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); + + /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ + val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; + val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; + mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); +} + +static int mvneta_bm_init(struct mvneta_bm *priv) +{ + mvneta_bm_default_set(priv); + + /* Allocate and initialize BM pools structures */ + priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, + sizeof(struct mvneta_bm_pool), + GFP_KERNEL); + if (!priv->bm_pools) + return -ENOMEM; + + mvneta_bm_pools_init(priv); + + return 0; +} + +static int mvneta_bm_get_sram(struct device_node *dn, + struct mvneta_bm *priv) +{ + priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); + if (!priv->bppi_pool) + return -ENOMEM; + + priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, + MVNETA_BM_BPPI_SIZE, + &priv->bppi_phys_addr); + if (!priv->bppi_virt_addr) + return -ENOMEM; + + return 0; +} + +static void mvneta_bm_put_sram(struct mvneta_bm *priv) +{ + gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, + MVNETA_BM_BPPI_SIZE); +} + +static int mvneta_bm_probe(struct platform_device *pdev) +{ + struct device_node *dn = pdev->dev.of_node; + struct mvneta_bm *priv; + struct resource *res; + int err; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->reg_base)) + return PTR_ERR(priv->reg_base); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + err = clk_prepare_enable(priv->clk); + if (err < 0) + return err; + + err = mvneta_bm_get_sram(dn, priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to allocate internal memory\n"); + goto err_clk; + } + + priv->pdev = pdev; + + /* Initialize buffer manager internals */ + err = mvneta_bm_init(priv); + if (err < 0) { + dev_err(&pdev->dev, "failed to initialize controller\n"); + goto err_sram; + } + + dn->data = priv; + platform_set_drvdata(pdev, priv); + + dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); + + return 0; + +err_sram: + mvneta_bm_put_sram(priv); +err_clk: + clk_disable_unprepare(priv->clk); + return err; +} + +static int mvneta_bm_remove(struct platform_device *pdev) +{ + struct mvneta_bm *priv = platform_get_drvdata(pdev); + u8 all_ports_map = 0xff; + int i = 0; + + for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { + struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; + + mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); + } + + mvneta_bm_put_sram(priv); + + /* Dectivate BM unit */ + mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); + + clk_disable_unprepare(priv->clk); + + return 0; +} + +static const struct of_device_id mvneta_bm_match[] = { + { .compatible = "marvell,armada-380-neta-bm" }, + { } +}; +MODULE_DEVICE_TABLE(of, mvneta_bm_match); + +static struct platform_driver mvneta_bm_driver = { + .probe = mvneta_bm_probe, + .remove = mvneta_bm_remove, + .driver = { + .name = MVNETA_BM_DRIVER_NAME, + .of_match_table = mvneta_bm_match, + }, +}; + +module_platform_driver(mvneta_bm_driver); + +MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); +MODULE_AUTHOR("Marcin Wojtas "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h new file mode 100644 index 000000000000..db239e061ab0 --- /dev/null +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -0,0 +1,189 @@ +/* + * Driver for Marvell NETA network controller Buffer Manager. + * + * Copyright (C) 2015 Marvell + * + * Marcin Wojtas + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef _MVNETA_BM_H_ +#define _MVNETA_BM_H_ + +/* BM Configuration Register */ +#define MVNETA_BM_CONFIG_REG 0x0 +#define MVNETA_BM_STATUS_MASK 0x30 +#define MVNETA_BM_ACTIVE_MASK BIT(4) +#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000 +#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18) +#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19) + +/* BM Activation Register */ +#define MVNETA_BM_COMMAND_REG 0x4 +#define MVNETA_BM_START_MASK BIT(0) +#define MVNETA_BM_STOP_MASK BIT(1) +#define MVNETA_BM_PAUSE_MASK BIT(2) + +/* BM Xbar interface Register */ +#define MVNETA_BM_XBAR_01_REG 0x8 +#define MVNETA_BM_XBAR_23_REG 0xc +#define MVNETA_BM_XBAR_POOL_REG(pool) \ + (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG) +#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0) +#define MVNETA_BM_TARGET_ID_MASK(pool) \ + (0xf << MVNETA_BM_TARGET_ID_OFFS(pool)) +#define MVNETA_BM_TARGET_ID_VAL(pool, id) \ + ((id) << MVNETA_BM_TARGET_ID_OFFS(pool)) +#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4) +#define MVNETA_BM_XBAR_ATTR_MASK(pool) \ + (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool)) +#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \ + ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool)) + +/* Address of External Buffer Pointers Pool Register */ +#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4)) +#define MVNETA_BM_POOL_ENABLE_MASK BIT(0) + +/* External Buffer Pointers Pool RD pointer Register */ +#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4)) +#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc +#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16 +#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000 + +/* External Buffer Pointers Pool WR pointer */ +#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4)) +#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0 +#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc +#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16 +#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000 + +/* External Buffer Pointers Pool Size Register */ +#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4)) +#define MVNETA_BM_POOL_SIZE_MASK 0x3fff + +/* BM Interrupt Cause Register */ +#define MVNETA_BM_INTR_CAUSE_REG (0x50) + +/* BM interrupt Mask Register */ +#define MVNETA_BM_INTR_MASK_REG (0x54) + +/* Other definitions */ +#define MVNETA_BM_SHORT_PKT_SIZE 256 +#define MVNETA_BM_POOLS_NUM 4 +#define MVNETA_BM_POOL_CAP_MIN 128 +#define MVNETA_BM_POOL_CAP_DEF 2048 +#define MVNETA_BM_POOL_CAP_MAX \ + (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN) +#define MVNETA_BM_POOL_CAP_ALIGN 32 +#define MVNETA_BM_POOL_PTR_ALIGN 32 + +#define MVNETA_BM_POOL_ACCESS_OFFS 8 + +#define MVNETA_BM_BPPI_SIZE 0x100000 + +#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) + +enum mvneta_bm_type { + MVNETA_BM_FREE, + MVNETA_BM_LONG, + MVNETA_BM_SHORT +}; + +struct mvneta_bm { + void __iomem *reg_base; + struct clk *clk; + struct platform_device *pdev; + + struct gen_pool *bppi_pool; + /* BPPI virtual base address */ + void __iomem *bppi_virt_addr; + /* BPPI physical base address */ + dma_addr_t bppi_phys_addr; + + /* BM pools */ + struct mvneta_bm_pool *bm_pools; +}; + +struct mvneta_bm_pool { + /* Pool number in the range 0-3 */ + u8 id; + enum mvneta_bm_type type; + + /* Buffer Pointers Pool External (BPPE) size in number of bytes */ + int size; + /* Number of buffers used by this pool */ + int buf_num; + /* Pool buffer size */ + int buf_size; + /* Packet size */ + int pkt_size; + /* Single frag size */ + u32 frag_size; + + /* BPPE virtual base address */ + u32 *virt_addr; + /* BPPE physical base address */ + dma_addr_t phys_addr; + + /* Ports using BM pool */ + u8 port_map; + + struct mvneta_bm *priv; +}; + +/* Declarations and definitions */ +void *mvneta_frag_alloc(unsigned int frag_size); +void mvneta_frag_free(unsigned int frag_size, void *data); + +#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE) +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map); +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map); +int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + int buf_num); +int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool); +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size); + +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + dma_addr_t buf_phys_addr) +{ + writel_relaxed(buf_phys_addr, priv->bppi_virt_addr + + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); +} + +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ + return readl_relaxed(priv->bppi_virt_addr + + (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); +} +#else +void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map) {} +void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + int buf_num) { return 0; } +int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) {return 0; } +struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, + enum mvneta_bm_type type, u8 port_id, + int pkt_size) { return NULL; } + +static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, + dma_addr_t buf_phys_addr) {} + +static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) +{ return 0; } +#endif /* CONFIG_MVNETA_BM */ +#endif -- cgit v1.2.3 From baa11ebc0c7680861f74f8a11dca903e4b421262 Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Mon, 14 Mar 2016 09:39:05 +0100 Subject: net: mvneta: Use the new hwbm framework Now that the hardware buffer management framework had been introduced, let's use it. Tested-by: Sebastian Careba Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Kconfig | 1 + drivers/net/ethernet/marvell/mvneta.c | 18 +++-- drivers/net/ethernet/marvell/mvneta_bm.c | 125 ++++++++----------------------- drivers/net/ethernet/marvell/mvneta_bm.h | 17 ++--- 4 files changed, 49 insertions(+), 112 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index ac6605c62f46..62d80fddbe34 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -43,6 +43,7 @@ config MVMDIO config MVNETA_BM tristate "Marvell Armada 38x/XP network interface BM support" depends on MVNETA + select HWBM ---help--- This driver supports auxiliary block of the network interface units in the Marvell ARMADA XP and ARMADA 38x SoC diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 2847c0c291de..3d8e7d357ec9 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "mvneta_bm.h" #include #include @@ -1026,11 +1027,12 @@ static int mvneta_bm_port_init(struct platform_device *pdev, static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) { struct mvneta_bm_pool *bm_pool = pp->pool_long; + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; int num; /* Release all buffers from long pool */ mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); - if (bm_pool->buf_num) { + if (hwbm_pool->buf_num) { WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); goto bm_mtu_err; @@ -1038,14 +1040,14 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); - bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); + hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); /* Fill entire long pool */ - num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size); - if (num != bm_pool->size) { + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); + if (num != hwbm_pool->size) { WARN(1, "pool %d: %d of %d allocated\n", - bm_pool->id, num, bm_pool->size); + bm_pool->id, num, hwbm_pool->size); goto bm_mtu_err; } mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); @@ -2066,14 +2068,14 @@ err_drop_frame: } /* Refill processing */ - err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool); + err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); if (err) { netdev_err(dev, "Linux processing - Can't refill\n"); rxq->missed++; goto err_drop_frame_ret_pool; } - frag_size = bm_pool->frag_size; + frag_size = bm_pool->hwbm_pool.frag_size; skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c index 8c968e7d2d8f..01fccec632ec 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.c +++ b/drivers/net/ethernet/marvell/mvneta_bm.c @@ -10,16 +10,17 @@ * warranty of any kind, whether express or implied. */ -#include +#include #include -#include -#include -#include +#include +#include #include #include -#include +#include #include -#include +#include +#include +#include #include "mvneta_bm.h" #define MVNETA_BM_DRIVER_NAME "mvneta_bm" @@ -88,17 +89,13 @@ static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); } -/* Allocate skb for BM pool */ -void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, - dma_addr_t *buf_phys_addr) +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { - void *buf; + struct mvneta_bm_pool *bm_pool = + (struct mvneta_bm_pool *)hwbm_pool->priv; + struct mvneta_bm *priv = bm_pool->priv; dma_addr_t phys_addr; - buf = mvneta_frag_alloc(bm_pool->frag_size); - if (!buf) - return NULL; - /* In order to update buf_cookie field of RX descriptor properly, * BM hardware expects buf virtual address to be placed in the * first four bytes of mapped buffer. @@ -106,75 +103,13 @@ void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, *(u32 *)buf = (u32)buf; phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) { - mvneta_frag_free(bm_pool->frag_size, buf); - return NULL; - } - *buf_phys_addr = phys_addr; - - return buf; -} - -/* Refill processing for HW buffer management */ -int mvneta_bm_pool_refill(struct mvneta_bm *priv, - struct mvneta_bm_pool *bm_pool) -{ - dma_addr_t buf_phys_addr; - void *buf; - - buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr); - if (!buf) + if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) return -ENOMEM; - mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr); - + mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); return 0; } -EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill); - -/* Allocate buffers for the pool */ -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, - int buf_num) -{ - int err, i; - - if (bm_pool->buf_num == bm_pool->size) { - dev_dbg(&priv->pdev->dev, "pool %d already filled\n", - bm_pool->id); - return bm_pool->buf_num; - } - - if (buf_num < 0 || - (buf_num + bm_pool->buf_num > bm_pool->size)) { - dev_err(&priv->pdev->dev, - "cannot allocate %d buffers for pool %d\n", - buf_num, bm_pool->id); - return 0; - } - - for (i = 0; i < buf_num; i++) { - err = mvneta_bm_pool_refill(priv, bm_pool); - if (err < 0) - break; - } - - /* Update BM driver with number of buffers added to pool */ - bm_pool->buf_num += i; - - dev_dbg(&priv->pdev->dev, - "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n", - bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", - bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size, - bm_pool->frag_size); - - dev_dbg(&priv->pdev->dev, - "%s pool %d: %d of %d buffers added\n", - bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", - bm_pool->id, i, buf_num); - - return i; -} -EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add); +EXPORT_SYMBOL_GPL(mvneta_bm_construct); /* Create pool */ static int mvneta_bm_pool_create(struct mvneta_bm *priv, @@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct mvneta_bm *priv, struct platform_device *pdev = priv->pdev; u8 target_id, attr; int size_bytes, err; - - size_bytes = sizeof(u32) * bm_pool->size; + size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, &bm_pool->phys_addr, GFP_KERNEL); @@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, /* Allocate buffers in case BM pool hasn't been used yet */ if (new_pool->type == MVNETA_BM_FREE) { + struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; + + new_pool->priv = priv; new_pool->type = type; new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); - new_pool->frag_size = + hwbm_pool->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + hwbm_pool->construct = mvneta_bm_construct; + hwbm_pool->priv = new_pool; /* Create new pool */ err = mvneta_bm_pool_create(priv, new_pool); @@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, } /* Allocate buffers for this pool */ - num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size); - if (num != new_pool->size) { + num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); + if (num != hwbm_pool->size) { WARN(1, "pool %d: %d of %d allocated\n", - new_pool->id, num, new_pool->size); + new_pool->id, num, hwbm_pool->size); return NULL; } } @@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); - for (i = 0; i < bm_pool->buf_num; i++) { + for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { dma_addr_t buf_phys_addr; u32 *vaddr; @@ -303,13 +242,13 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, dma_unmap_single(&priv->pdev->dev, buf_phys_addr, bm_pool->buf_size, DMA_FROM_DEVICE); - mvneta_frag_free(bm_pool->frag_size, vaddr); + hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); } mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); /* Update BM driver with number of buffers removed from pool */ - bm_pool->buf_num -= i; + bm_pool->hwbm_pool.buf_num -= i; } EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); @@ -317,6 +256,7 @@ EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); void mvneta_bm_pool_destroy(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map) { + struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; bm_pool->port_map &= ~port_map; if (bm_pool->port_map) return; @@ -324,11 +264,12 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv, bm_pool->type = MVNETA_BM_FREE; mvneta_bm_bufs_free(priv, bm_pool, port_map); - if (bm_pool->buf_num) + if (hwbm_pool->buf_num) WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); if (bm_pool->virt_addr) { - dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size, + dma_free_coherent(&priv->pdev->dev, + sizeof(u32) * hwbm_pool->size, bm_pool->virt_addr, bm_pool->phys_addr); bm_pool->virt_addr = NULL; } @@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct mvneta_bm *priv) MVNETA_BM_POOL_CAP_ALIGN)); size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); } - bm_pool->size = size; + bm_pool->hwbm_pool.size = size; mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), - bm_pool->size); + bm_pool->hwbm_pool.size); /* Obtain custom pkt_size from DT */ sprintf(prop, "pool%d,pkt-size", i); diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h index db239e061ab0..e74fd44a92f7 100644 --- a/drivers/net/ethernet/marvell/mvneta_bm.h +++ b/drivers/net/ethernet/marvell/mvneta_bm.h @@ -108,20 +108,15 @@ struct mvneta_bm { }; struct mvneta_bm_pool { + struct hwbm_pool hwbm_pool; /* Pool number in the range 0-3 */ u8 id; enum mvneta_bm_type type; - /* Buffer Pointers Pool External (BPPE) size in number of bytes */ - int size; - /* Number of buffers used by this pool */ - int buf_num; - /* Pool buffer size */ - int buf_size; /* Packet size */ int pkt_size; - /* Single frag size */ - u32 frag_size; + /* Size of the buffer acces through DMA*/ + u32 buf_size; /* BPPE virtual base address */ u32 *virt_addr; @@ -143,8 +138,7 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map); void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map); -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, - int buf_num); +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf); int mvneta_bm_pool_refill(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool); struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, @@ -170,8 +164,7 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map) {} void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, u8 port_map) {} -int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, - int buf_num) { return 0; } +int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; } int mvneta_bm_pool_refill(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool) {return 0; } struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, -- cgit v1.2.3 From 5c2e26f6f674ee93719769d024cc72fad3ba9134 Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Mon, 14 Mar 2016 16:36:14 +0530 Subject: net: thunderx: Set recevie buffer page usage count in bulk Instead of calling get_page() for every receive buffer carved out of page, set page's usage count at the end, to reduce no of atomic calls. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 1 + drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 31 ++++++++++++++++------ 2 files changed, 24 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 092f097a5943..872b22d831ee 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -294,6 +294,7 @@ struct nicvf { u32 speed; struct page *rb_page; u32 rb_page_offset; + u16 rb_pageref; bool rb_alloc_fail; bool rb_work_scheduled; struct delayed_work rbdr_work; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 0dd1abf86079..fa05e347262f 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -18,6 +18,15 @@ #include "q_struct.h" #include "nicvf_queues.h" +static void nicvf_get_page(struct nicvf *nic) +{ + if (!nic->rb_pageref || !nic->rb_page) + return; + + atomic_add(nic->rb_pageref, &nic->rb_page->_count); + nic->rb_pageref = 0; +} + /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, u64 reg, int bit_pos, int bits, int val) @@ -81,16 +90,15 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0; /* Check if request can be accomodated in previous allocated page */ - if (nic->rb_page) { - if ((nic->rb_page_offset + buf_len + buf_len) > - (PAGE_SIZE << order)) { - nic->rb_page = NULL; - } else { - nic->rb_page_offset += buf_len; - get_page(nic->rb_page); - } + if (nic->rb_page && + ((nic->rb_page_offset + buf_len) < (PAGE_SIZE << order))) { + nic->rb_pageref++; + goto ret; } + nicvf_get_page(nic); + nic->rb_page = NULL; + /* Allocate a new page */ if (!nic->rb_page) { nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, @@ -102,7 +110,9 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, nic->rb_page_offset = 0; } +ret: *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset); + nic->rb_page_offset += buf_len; return 0; } @@ -158,6 +168,9 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, desc = GET_RBDR_DESC(rbdr, idx); desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN; } + + nicvf_get_page(nic); + return 0; } @@ -241,6 +254,8 @@ refill: new_rb++; } + nicvf_get_page(nic); + /* make sure all memory stores are done before ringing doorbell */ smp_wmb(); -- cgit v1.2.3 From 1d368790bc2d4111feae6fcc0b3b68d6ae92ff0f Mon Sep 17 00:00:00 2001 From: Sunil Goutham Date: Mon, 14 Mar 2016 16:36:15 +0530 Subject: net: thunderx: Adjust nicvf structure to reduce cache misses Adjusted nicvf structure such that all elements used in hot path like napi, xmit e.t.c fall into same cache line. This reduced no of cache misses and resulted in ~2% increase in no of packets handled on a core. Also modified elements with :1 notation to boolean, to be consistent with other element definitions. Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/nic.h | 52 ++++++++++++++++++------------- 1 file changed, 30 insertions(+), 22 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 872b22d831ee..83025bb4737c 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h @@ -272,46 +272,54 @@ struct nicvf { struct nicvf *pnicvf; struct net_device *netdev; struct pci_dev *pdev; + void __iomem *reg_base; + struct queue_set *qs; + struct nicvf_cq_poll *napi[8]; u8 vf_id; - u8 node; - u8 tns_mode:1; - u8 sqs_mode:1; - u8 loopback_supported:1; + u8 sqs_id; + bool sqs_mode; bool hw_tso; - u16 mtu; - struct queue_set *qs; + + /* Receive buffer alloc */ + u32 rb_page_offset; + u16 rb_pageref; + bool rb_alloc_fail; + bool rb_work_scheduled; + struct page *rb_page; + struct delayed_work rbdr_work; + struct tasklet_struct rbdr_task; + + /* Secondary Qset */ + u8 sqs_count; #define MAX_SQS_PER_VF_SINGLE_NODE 5 #define MAX_SQS_PER_VF 11 - u8 sqs_id; - u8 sqs_count; /* Secondary Qset count */ struct nicvf *snicvf[MAX_SQS_PER_VF]; + + /* Queue count */ u8 rx_queues; u8 tx_queues; u8 max_queues; - void __iomem *reg_base; + + u8 node; + u8 cpi_alg; + u16 mtu; bool link_up; u8 duplex; u32 speed; - struct page *rb_page; - u32 rb_page_offset; - u16 rb_pageref; - bool rb_alloc_fail; - bool rb_work_scheduled; - struct delayed_work rbdr_work; - struct tasklet_struct rbdr_task; - struct tasklet_struct qs_err_task; - struct tasklet_struct cq_task; - struct nicvf_cq_poll *napi[8]; + bool tns_mode; + bool loopback_supported; struct nicvf_rss_info rss_info; - u8 cpi_alg; + struct tasklet_struct qs_err_task; + struct work_struct reset_task; + /* Interrupt coalescing settings */ u32 cq_coalesce_usecs; - u32 msg_enable; + + /* Stats */ struct nicvf_hw_stats hw_stats; struct nicvf_drv_stats drv_stats; struct bgx_stats bgx_stats; - struct work_struct reset_task; /* MSI-X */ bool msix_enabled; -- cgit v1.2.3 From 6aab1a628b99bc1350673e5191a21571626cade3 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:07:10 +0100 Subject: net: mediatek: use dma_addr_t correctly dma_alloc_coherent() expects a dma_addr_t pointer as its argument, not an 'unsigned int', and gcc correctly warns about broken code in the mtk_init_fq_dma function: drivers/net/ethernet/mediatek/mtk_eth_soc.c: In function 'mtk_init_fq_dma': drivers/net/ethernet/mediatek/mtk_eth_soc.c:463:13: error: passing argument 3 of 'dma_alloc_coherent' from incompatible pointer type [-Werror=incompatible-pointer-types] This changes the type of the local variable to dma_addr_t. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index ba3afa5d4640..3e42204adfe5 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -453,7 +453,7 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd, /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { - unsigned int phy_ring_head, phy_ring_tail; + dma_addr_t phy_ring_head, phy_ring_tail; int cnt = MTK_DMA_SIZE; dma_addr_t dma_addr; int i; -- cgit v1.2.3 From a25cdc0dd87d308b508ebea436cb43ee49d54a0e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:07:11 +0100 Subject: net: mediatek: remove incorrect dma_mask assignment Device drivers should not mess with the DMA mask directly, but instead call dma_set_mask() etc if needed. In case of the mtk_eth_soc driver, the mask already gets set correctly when the device is created, and setting it again is against the documented API. This removes the incorrect setting. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 3e42204adfe5..87f417712da0 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1676,9 +1676,6 @@ static int mtk_probe(struct platform_device *pdev) struct mtk_eth *eth; int err; - pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); - pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - device_reset(&pdev->dev); match = of_match_device(of_mtk_match, &pdev->dev); -- cgit v1.2.3 From 916848ca6f10e1bbf0c2af72ef51e494aaa5c466 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:07:12 +0100 Subject: net: mediatek: check device_reset return code The device_reset() function may fail, so we have to check its return value, e.g. to make deferred probing work correctly. gcc warns about it because of the warn_unused_result attribute: drivers/net/ethernet/mediatek/mtk_eth_soc.c: In function 'mtk_probe': drivers/net/ethernet/mediatek/mtk_eth_soc.c:1679:2: error: ignoring return value of 'device_reset', declared with attribute warn_unused_result [-Werror=unused-result] This adds the trivial error check to propagate the return value to the generic platform device probe code. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 87f417712da0..1e6c5498bba9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -1676,7 +1676,9 @@ static int mtk_probe(struct platform_device *pdev) struct mtk_eth *eth; int err; - device_reset(&pdev->dev); + err = device_reset(&pdev->dev); + if (err) + return err; match = of_match_device(of_mtk_match, &pdev->dev); soc = (struct mtk_soc_data *)match->data; -- cgit v1.2.3 From baefd7015cdb304ce6c94f9679d0486c71954766 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:18:34 +0100 Subject: mlx4: add missing braces in verify_qp_parameters The implementation of QP paravirtualization back in linux-3.7 included some code that looks very dubious, and gcc-6 has grown smart enough to warn about it: drivers/net/ethernet/mellanox/mlx4/resource_tracker.c: In function 'verify_qp_parameters': drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:3154:5: error: statement is indented as if it were guarded by... [-Werror=misleading-indentation] if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) { ^~ drivers/net/ethernet/mellanox/mlx4/resource_tracker.c:3144:4: note: ...this 'if' clause, but it is not if (slave != mlx4_master_func_num(dev)) >From looking at the context, I'm reasonably sure that the indentation is correct but that it should have contained curly braces from the start, as the update_gid() function in the same patch correctly does. Signed-off-by: Arnd Bergmann Fixes: 54679e148287 ("mlx4: Implement QP paravirtualization and maintain phys_pkey_cache for smp_snoop") Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 25ce1b030a00..cd9b2b28df88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3141,7 +3141,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, case QP_TRANS_RTS2RTS: case QP_TRANS_SQD2SQD: case QP_TRANS_SQD2RTS: - if (slave != mlx4_master_func_num(dev)) + if (slave != mlx4_master_func_num(dev)) { if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) { port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1; if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB) @@ -3160,6 +3160,7 @@ static int verify_qp_parameters(struct mlx4_dev *dev, if (qp_ctx->alt_path.mgid_index >= num_gids) return -EINVAL; } + } break; default: break; -- cgit v1.2.3 From e725a66c0202b5f36c2f9d59d26a65c53bbf21f7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:18:35 +0100 Subject: farsync: fix off-by-one bug in fst_add_one gcc-6 finds an out of bounds access in the fst_add_one function when calculating the end of the mmio area: drivers/net/wan/farsync.c: In function 'fst_add_one': drivers/net/wan/farsync.c:418:53: error: index 2 denotes an offset greater than size of 'u8[2][8192] {aka unsigned char[2][8192]}' [-Werror=array-bounds] #define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X)) ^ include/linux/compiler-gcc.h:158:21: note: in definition of macro '__compiler_offsetof' __builtin_offsetof(a, b) ^ drivers/net/wan/farsync.c:418:37: note: in expansion of macro 'offsetof' #define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X)) ^~~~~~~~ drivers/net/wan/farsync.c:2519:36: note: in expansion of macro 'BUF_OFFSET' + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]); ^~~~~~~~~~ The warning is correct, but not critical because this appears to be a write-only variable that is set by each WAN driver but never accessed afterwards. I'm taking the minimal fix here, using the correct pointer by pointing 'mem_end' to the last byte inside of the register area as all other WAN drivers do, rather than the first byte outside of it. An alternative would be to just remove the mem_end member entirely. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/wan/farsync.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 44541dbc5c28..69b994f3b8c5 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2516,7 +2516,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->mem_start = card->phys_mem + BUF_OFFSET ( txBuffer[i][0][0]); dev->mem_end = card->phys_mem - + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]); + + BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]); dev->base_addr = card->pci_conf; dev->irq = card->irq; -- cgit v1.2.3 From 83d6f1f15f8cce844b0a131cbc63e444620e48b5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:18:36 +0100 Subject: ath9k: fix buffer overrun for ar9287 Code that was added back in 2.6.38 has an obvious overflow when accessing a static array, and at the time it was added only a code comment was put in front of it as a reminder to have it reviewed properly. This has not happened, but gcc-6 now points to the specific overflow: drivers/net/wireless/ath/ath9k/eeprom.c: In function 'ath9k_hw_get_gain_boundaries_pdadcs': drivers/net/wireless/ath/ath9k/eeprom.c:483:44: error: array subscript is above array bounds [-Werror=array-bounds] maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4]; ~~~~~~~~~~~~~~~~~~~~~~~~~^~~ It turns out that the correct array length exists in the local 'intercepts' variable of this function, so we can just use that instead of hardcoding '4', so this patch changes all three instances to use that variable. The other two instances were already correct, but it's more consistent this way. Signed-off-by: Arnd Bergmann Fixes: 940cd2c12ebf ("ath9k_hw: merge the ar9287 version of ath9k_hw_get_gain_boundaries_pdadcs") Signed-off-by: David S. Miller --- drivers/net/wireless/ath/ath9k/eeprom.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index 73fb4232f9f2..a794157a147d 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -477,10 +477,9 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, if (match) { if (AR_SREV_9287(ah)) { - /* FIXME: array overrun? */ for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_9287[idxL].pwrPdg[i][0]; - maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4]; + maxPwrT4[i] = data_9287[idxL].pwrPdg[i][intercepts - 1]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_9287[idxL].pwrPdg[i], data_9287[idxL].vpdPdg[i], @@ -490,7 +489,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, } else if (eeprom_4k) { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_4k[idxL].pwrPdg[i][0]; - maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4]; + maxPwrT4[i] = data_4k[idxL].pwrPdg[i][intercepts - 1]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_4k[idxL].pwrPdg[i], data_4k[idxL].vpdPdg[i], @@ -500,7 +499,7 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah, } else { for (i = 0; i < numXpdGains; i++) { minPwrT4[i] = data_def[idxL].pwrPdg[i][0]; - maxPwrT4[i] = data_def[idxL].pwrPdg[i][4]; + maxPwrT4[i] = data_def[idxL].pwrPdg[i][intercepts - 1]; ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i], data_def[idxL].pwrPdg[i], data_def[idxL].vpdPdg[i], -- cgit v1.2.3 From 362210e0dff4eb7bb36a9b34dbef3b39d779d95e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:18:37 +0100 Subject: ath9k: fix misleading indentation A cleanup patch in linux-3.18 moved around some code in the ath9k driver and left some code to be indented in a misleading way, made worse by the addition of some new code for p2p mode, as discovered by a new gcc-6 warning: drivers/net/wireless/ath/ath9k/init.c: In function 'ath9k_set_hw_capab': drivers/net/wireless/ath/ath9k/init.c:851:4: warning: statement is indented as if it were guarded by... [-Wmisleading-indentation] hw->wiphy->iface_combinations = if_comb; ^~ drivers/net/wireless/ath/ath9k/init.c:847:3: note: ...this 'if' clause, but it is not if (ath9k_is_chanctx_enabled()) ^~ The code is in fact correct, but the indentation is not, so I'm reformatting it as it should have been after the original cleanup. Signed-off-by: Arnd Bergmann Fixes: 499afaccf6f3 ("ath9k: Isolate ath9k_use_chanctx module parameter") Fixes: eb61f9f623f7 ("ath9k: advertise p2p dev support when chanctx") Signed-off-by: David S. Miller --- drivers/net/wireless/ath/ath9k/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index d4e0ac946c3a..1c226d63bb03 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -848,8 +848,8 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE); - hw->wiphy->iface_combinations = if_comb; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); + hw->wiphy->iface_combinations = if_comb; + hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); } hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; -- cgit v1.2.3 From efc21d95067f658a20e51e24c4c86d68f23b7f7f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 15:53:57 +0100 Subject: vmxnet3: fix lock imbalance in vmxnet3_tq_xmit() A recent bug fix rearranged the code in vmxnet3_tq_xmit() in a way that left the error handling for oversized headers unlock a lock that had not been taken yet. Gcc warns about the incorrect use of the 'flags' variable because of that: drivers/net/vmxnet3/vmxnet3_drv.c: In function 'vmxnet3_tq_xmit.constprop': include/linux/spinlock.h:246:3: error: 'flags' may be used uninitialized in this function [-Werror=maybe-uninitialized] This changes the error handling path to 'goto' the end of the function beyond the lock/unlock pair. Signed-off-by: Arnd Bergmann Fixes: cec05562fb1d ("vmxnet3: avoid calling pskb_may_pull with interrupts disabled") Signed-off-by: David S. Miller --- drivers/net/vmxnet3/vmxnet3_drv.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index fc895d0e85d9..b2348f67b00a 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1022,14 +1022,16 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, if (ctx.mss) { if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size > VMXNET3_MAX_TX_BUF_SIZE)) { - goto hdr_too_big; + tq->stats.drop_oversized_hdr++; + goto drop_pkt; } } else { if (skb->ip_summed == CHECKSUM_PARTIAL) { if (unlikely(ctx.eth_ip_hdr_size + skb->csum_offset > VMXNET3_MAX_CSUM_OFFSET)) { - goto hdr_too_big; + tq->stats.drop_oversized_hdr++; + goto drop_pkt; } } } @@ -1123,8 +1125,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, return NETDEV_TX_OK; -hdr_too_big: - tq->stats.drop_oversized_hdr++; unlock_drop_pkt: spin_unlock_irqrestore(&tq->tx_lock, flags); drop_pkt: -- cgit v1.2.3 From 7c82a7b998c6c6a2cb8a42aeb1d5d3450e51afb3 Mon Sep 17 00:00:00 2001 From: Igal Liberman Date: Sun, 13 Mar 2016 21:14:43 +0200 Subject: fsl/fman: fix dtsec_set_tx_pause_frames Fix a bug introduced in e06a03b (fsl/fman: fix the pause_time test) When pause_time is set to '0' - pause frames are disabled and there's no need to apply dTSEC-A003 Errata workaround. Signed-off-by: Igal Liberman Signed-off-by: David S. Miller --- drivers/net/ethernet/freescale/fman/fman_dtsec.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c index 7c92eb854925..c88918c4c5f3 100644 --- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c +++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c @@ -932,15 +932,14 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; - /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ - if (dtsec->fm_rev_info.major == 2) - if (pause_time <= 320) { + if (pause_time) { + /* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */ + if (dtsec->fm_rev_info.major == 2 && pause_time <= 320) { pr_warn("pause-time: %d illegal.Should be > 320\n", pause_time); return -EINVAL; } - if (pause_time) { ptv = ioread32be(®s->ptv); ptv &= PTV_PTE_MASK; ptv |= pause_time & PTV_PT_MASK; -- cgit v1.2.3 From 0df83e7a6b58674683677f572e4e2361b2a69eea Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Fri, 11 Mar 2016 10:10:23 +0100 Subject: net: mvneta: Add missing hotplug notifier transition The mvneta_percpu_notifier() hotplug callback lacks handling of the CPU_DOWN_FAILED case. That means, if CPU_DOWN_PREPARE failes, the driver is not well configured on the CPU. Add handling for CPU_DOWN_FAILED[_FROZEN] hotplug notifier transition to setup the driver. Cc: Thomas Petazzoni Cc: netdev@vger.kernel.org Signed-off-by: Anna-Maria Gleixner Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 3d8e7d357ec9..be92668faf3e 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3328,6 +3328,8 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: case CPU_ONLINE_FROZEN: + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: spin_lock(&pp->lock); /* Configuring the driver for a new CPU while the * driver is stopping is racy, so just avoid it. -- cgit v1.2.3 From 5fc7cf179449502ad4ad67845ded2df94b680de2 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 11 Mar 2016 09:53:09 -0800 Subject: net: thunderx: Cleanup PHY probing code. Remove the call to force the octeon-mdio driver to be loaded. Allow the standard driver loading mechanisms to load the PHY drivers, and use -EPROBE_DEFER to cause the BGX driver to be probed only after the PHY drivers are available. Reorder the setting of MAC addresses and PHY probing to allow BGX LMACs with no attached PHY to still be assigned a MAC address. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 29 ++++++++++++++--------- 1 file changed, 18 insertions(+), 11 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index f8abdffdd851..feed2318201b 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -978,27 +978,37 @@ static int bgx_init_of_phy(struct bgx *bgx) const char *mac; device_for_each_child_node(&bgx->pdev->dev, fwn) { + struct phy_device *pd; struct device_node *phy_np; struct device_node *node = to_of_node(fwn); - /* If it is not an OF node we cannot handle it yet, so - * exit the loop. + /* Should always be an OF node. But if it is not, we + * cannot handle it, so exit the loop. */ if (!node) break; - phy_np = of_parse_phandle(node, "phy-handle", 0); - if (!phy_np) - continue; - - bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); - mac = of_get_mac_address(node); if (mac) ether_addr_copy(bgx->lmac[lmac].mac, mac); SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; + + phy_np = of_parse_phandle(node, "phy-handle", 0); + /* If there is no phy or defective firmware presents + * this cortina phy, for which there is no driver + * support, ignore it. + */ + if (phy_np && + !of_device_is_compatible(phy_np, "cortina,cs4223-slice")) { + /* Wait until the phy drivers are available */ + pd = of_phy_find_device(phy_np); + if (!pd) + return -EPROBE_DEFER; + bgx->lmac[lmac].phydev = pd; + } + lmac++; if (lmac == MAX_LMAC_PER_BGX) { of_node_put(node); @@ -1032,9 +1042,6 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) struct bgx *bgx = NULL; u8 lmac; - /* Load octeon mdio driver */ - octeon_mdiobus_force_mod_depencency(); - bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL); if (!bgx) return -ENOMEM; -- cgit v1.2.3 From 1eefee901fca0208b8a56f20cdc134e2b8638ae7 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 11 Mar 2016 09:53:10 -0800 Subject: phy: mdio-octeon: Refactor into two files/modules A follow-on patch uses PCI probing to find the Thunder MDIO hardware. In preparation for this, split out the common code into a new file mdio-cavium.c, which will be used by both the existing OCTEON driver, and the new Thunder PCI based driver. As part of the refactoring simplify the struct cavium_mdiobus by removing fields that are only ever used in the probe function and can just as well be local variables. Use readq/writeq in preference to readq_relaxed/writeq_relaxed as the relaxed form was an optimization for an early chip revision, and the MDIO drivers are not performance bottlenecks that need optimization in the first place. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 11 +- drivers/net/phy/Makefile | 1 + drivers/net/phy/mdio-cavium.c | 149 ++++++++++++++++++++++ drivers/net/phy/mdio-cavium.h | 119 ++++++++++++++++++ drivers/net/phy/mdio-octeon.c | 280 +++--------------------------------------- 5 files changed, 292 insertions(+), 268 deletions(-) create mode 100644 drivers/net/phy/mdio-cavium.c create mode 100644 drivers/net/phy/mdio-cavium.h (limited to 'drivers') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index f0a77020037a..40faec9f3b0b 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -183,15 +183,18 @@ config MDIO_GPIO To compile this driver as a module, choose M here: the module will be called mdio-gpio. +config MDIO_CAVIUM + tristate + config MDIO_OCTEON - tristate "Support for MDIO buses on Octeon and ThunderX SOCs" + tristate "Support for MDIO buses on Octeon and some ThunderX SOCs" depends on 64BIT depends on HAS_IOMEM + select MDIO_CAVIUM help - This module provides a driver for the Octeon and ThunderX MDIO - busses. It is required by the Octeon and ThunderX ethernet device - drivers. + buses. It is required by the Octeon and ThunderX ethernet device + drivers on some systems. config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 680e88f9915a..041b3d977d31 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o +obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_AMD_PHY) += amd.o diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/phy/mdio-cavium.c new file mode 100644 index 000000000000..e796ee121eac --- /dev/null +++ b/drivers/net/phy/mdio-cavium.c @@ -0,0 +1,149 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009-2016 Cavium, Inc. + */ + +#include +#include +#include +#include + +#include "mdio-cavium.h" + +static void cavium_mdiobus_set_mode(struct cavium_mdiobus *p, + enum cavium_mdiobus_mode m) +{ + union cvmx_smix_clk smi_clk; + + if (m == p->mode) + return; + + smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK); + smi_clk.s.mode = (m == C45) ? 1 : 0; + smi_clk.s.preamble = 1; + oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK); + p->mode = m; +} + +static int cavium_mdiobus_c45_addr(struct cavium_mdiobus *p, + int phy_id, int regnum) +{ + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + int timeout = 1000; + + cavium_mdiobus_set_mode(p, C45); + + smi_wr.u64 = 0; + smi_wr.s.dat = regnum & 0xffff; + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); + + regnum = (regnum >> 16) & 0x1f; + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); + + do { + /* Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + __delay(1000); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); + } while (smi_wr.s.pending && --timeout); + + if (timeout <= 0) + return -EIO; + return 0; +} + +int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) +{ + struct cavium_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_rd_dat smi_rd; + unsigned int op = 1; /* MDIO_CLAUSE_22_READ */ + int timeout = 1000; + + if (regnum & MII_ADDR_C45) { + int r = cavium_mdiobus_c45_addr(p, phy_id, regnum); + + if (r < 0) + return r; + + regnum = (regnum >> 16) & 0x1f; + op = 3; /* MDIO_CLAUSE_45_READ */ + } else { + cavium_mdiobus_set_mode(p, C22); + } + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = op; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); + + do { + /* Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + __delay(1000); + smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); + } while (smi_rd.s.pending && --timeout); + + if (smi_rd.s.val) + return smi_rd.s.dat; + else + return -EIO; +} +EXPORT_SYMBOL(cavium_mdiobus_read); + +int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val) +{ + struct cavium_mdiobus *p = bus->priv; + union cvmx_smix_cmd smi_cmd; + union cvmx_smix_wr_dat smi_wr; + unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */ + int timeout = 1000; + + if (regnum & MII_ADDR_C45) { + int r = cavium_mdiobus_c45_addr(p, phy_id, regnum); + + if (r < 0) + return r; + + regnum = (regnum >> 16) & 0x1f; + op = 1; /* MDIO_CLAUSE_45_WRITE */ + } else { + cavium_mdiobus_set_mode(p, C22); + } + + smi_wr.u64 = 0; + smi_wr.s.dat = val; + oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); + + smi_cmd.u64 = 0; + smi_cmd.s.phy_op = op; + smi_cmd.s.phy_adr = phy_id; + smi_cmd.s.reg_adr = regnum; + oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); + + do { + /* Wait 1000 clocks so we don't saturate the RSL bus + * doing reads. + */ + __delay(1000); + smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); + } while (smi_wr.s.pending && --timeout); + + if (timeout <= 0) + return -EIO; + + return 0; +} +EXPORT_SYMBOL(cavium_mdiobus_write); diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/phy/mdio-cavium.h new file mode 100644 index 000000000000..4bccd45d24e2 --- /dev/null +++ b/drivers/net/phy/mdio-cavium.h @@ -0,0 +1,119 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009-2016 Cavium, Inc. + */ + +enum cavium_mdiobus_mode { + UNINIT = 0, + C22, + C45 +}; + +#define SMI_CMD 0x0 +#define SMI_WR_DAT 0x8 +#define SMI_RD_DAT 0x10 +#define SMI_CLK 0x18 +#define SMI_EN 0x20 + +#ifdef __BIG_ENDIAN_BITFIELD +#define OCT_MDIO_BITFIELD_FIELD(field, more) \ + field; \ + more + +#else +#define OCT_MDIO_BITFIELD_FIELD(field, more) \ + more \ + field; + +#endif + +union cvmx_smix_clk { + u64 u64; + struct cvmx_smix_clk_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39, + OCT_MDIO_BITFIELD_FIELD(u64 mode:1, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3, + OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5, + OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1, + OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1, + OCT_MDIO_BITFIELD_FIELD(u64 preamble:1, + OCT_MDIO_BITFIELD_FIELD(u64 sample:4, + OCT_MDIO_BITFIELD_FIELD(u64 phase:8, + ;)))))))))) + } s; +}; + +union cvmx_smix_cmd { + u64 u64; + struct cvmx_smix_cmd_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3, + OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5, + OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3, + OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5, + ;)))))) + } s; +}; + +union cvmx_smix_en { + u64 u64; + struct cvmx_smix_en_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63, + OCT_MDIO_BITFIELD_FIELD(u64 en:1, + ;)) + } s; +}; + +union cvmx_smix_rd_dat { + u64 u64; + struct cvmx_smix_rd_dat_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 pending:1, + OCT_MDIO_BITFIELD_FIELD(u64 val:1, + OCT_MDIO_BITFIELD_FIELD(u64 dat:16, + ;)))) + } s; +}; + +union cvmx_smix_wr_dat { + u64 u64; + struct cvmx_smix_wr_dat_s { + OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, + OCT_MDIO_BITFIELD_FIELD(u64 pending:1, + OCT_MDIO_BITFIELD_FIELD(u64 val:1, + OCT_MDIO_BITFIELD_FIELD(u64 dat:16, + ;)))) + } s; +}; + +struct cavium_mdiobus { + struct mii_bus *mii_bus; + u64 register_base; + enum cavium_mdiobus_mode mode; +}; + +#ifdef CONFIG_CAVIUM_OCTEON_SOC + +#include + +static inline void oct_mdio_writeq(u64 val, u64 addr) +{ + cvmx_write_csr(addr, val); +} + +static inline u64 oct_mdio_readq(u64 addr) +{ + return cvmx_read_csr(addr); +} +#else +#define oct_mdio_writeq(val, addr) writeq(val, (void *)addr) +#define oct_mdio_readq(addr) readq((void *)addr) +#endif + +int cavium_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum); +int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val); diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c index 47d4f2f263d1..ab6914f8bd50 100644 --- a/drivers/net/phy/mdio-octeon.c +++ b/drivers/net/phy/mdio-octeon.c @@ -3,272 +3,26 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2009-2012 Cavium, Inc. + * Copyright (C) 2009-2015 Cavium, Inc. */ #include #include #include -#include #include #include #include #include -#ifdef CONFIG_CAVIUM_OCTEON_SOC -#include -#endif - -#define DRV_VERSION "1.1" -#define DRV_DESCRIPTION "Cavium Networks Octeon/ThunderX SMI/MDIO driver" - -#define SMI_CMD 0x0 -#define SMI_WR_DAT 0x8 -#define SMI_RD_DAT 0x10 -#define SMI_CLK 0x18 -#define SMI_EN 0x20 - -#ifdef __BIG_ENDIAN_BITFIELD -#define OCT_MDIO_BITFIELD_FIELD(field, more) \ - field; \ - more - -#else -#define OCT_MDIO_BITFIELD_FIELD(field, more) \ - more \ - field; - -#endif - -union cvmx_smix_clk { - u64 u64; - struct cvmx_smix_clk_s { - OCT_MDIO_BITFIELD_FIELD(u64 reserved_25_63:39, - OCT_MDIO_BITFIELD_FIELD(u64 mode:1, - OCT_MDIO_BITFIELD_FIELD(u64 reserved_21_23:3, - OCT_MDIO_BITFIELD_FIELD(u64 sample_hi:5, - OCT_MDIO_BITFIELD_FIELD(u64 sample_mode:1, - OCT_MDIO_BITFIELD_FIELD(u64 reserved_14_14:1, - OCT_MDIO_BITFIELD_FIELD(u64 clk_idle:1, - OCT_MDIO_BITFIELD_FIELD(u64 preamble:1, - OCT_MDIO_BITFIELD_FIELD(u64 sample:4, - OCT_MDIO_BITFIELD_FIELD(u64 phase:8, - ;)))))))))) - } s; -}; - -union cvmx_smix_cmd { - u64 u64; - struct cvmx_smix_cmd_s { - OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, - OCT_MDIO_BITFIELD_FIELD(u64 phy_op:2, - OCT_MDIO_BITFIELD_FIELD(u64 reserved_13_15:3, - OCT_MDIO_BITFIELD_FIELD(u64 phy_adr:5, - OCT_MDIO_BITFIELD_FIELD(u64 reserved_5_7:3, - OCT_MDIO_BITFIELD_FIELD(u64 reg_adr:5, - ;)))))) - } s; -}; - -union cvmx_smix_en { - u64 u64; - struct cvmx_smix_en_s { - OCT_MDIO_BITFIELD_FIELD(u64 reserved_1_63:63, - OCT_MDIO_BITFIELD_FIELD(u64 en:1, - ;)) - } s; -}; - -union cvmx_smix_rd_dat { - u64 u64; - struct cvmx_smix_rd_dat_s { - OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, - OCT_MDIO_BITFIELD_FIELD(u64 pending:1, - OCT_MDIO_BITFIELD_FIELD(u64 val:1, - OCT_MDIO_BITFIELD_FIELD(u64 dat:16, - ;)))) - } s; -}; - -union cvmx_smix_wr_dat { - u64 u64; - struct cvmx_smix_wr_dat_s { - OCT_MDIO_BITFIELD_FIELD(u64 reserved_18_63:46, - OCT_MDIO_BITFIELD_FIELD(u64 pending:1, - OCT_MDIO_BITFIELD_FIELD(u64 val:1, - OCT_MDIO_BITFIELD_FIELD(u64 dat:16, - ;)))) - } s; -}; - -enum octeon_mdiobus_mode { - UNINIT = 0, - C22, - C45 -}; - -struct octeon_mdiobus { - struct mii_bus *mii_bus; - u64 register_base; - resource_size_t mdio_phys; - resource_size_t regsize; - enum octeon_mdiobus_mode mode; -}; - -#ifdef CONFIG_CAVIUM_OCTEON_SOC -static void oct_mdio_writeq(u64 val, u64 addr) -{ - cvmx_write_csr(addr, val); -} - -static u64 oct_mdio_readq(u64 addr) -{ - return cvmx_read_csr(addr); -} -#else -#define oct_mdio_writeq(val, addr) writeq_relaxed(val, (void *)addr) -#define oct_mdio_readq(addr) readq_relaxed((void *)addr) -#endif - -static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p, - enum octeon_mdiobus_mode m) -{ - union cvmx_smix_clk smi_clk; - - if (m == p->mode) - return; - - smi_clk.u64 = oct_mdio_readq(p->register_base + SMI_CLK); - smi_clk.s.mode = (m == C45) ? 1 : 0; - smi_clk.s.preamble = 1; - oct_mdio_writeq(smi_clk.u64, p->register_base + SMI_CLK); - p->mode = m; -} - -static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p, - int phy_id, int regnum) -{ - union cvmx_smix_cmd smi_cmd; - union cvmx_smix_wr_dat smi_wr; - int timeout = 1000; - - octeon_mdiobus_set_mode(p, C45); - - smi_wr.u64 = 0; - smi_wr.s.dat = regnum & 0xffff; - oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); - - regnum = (regnum >> 16) & 0x1f; - - smi_cmd.u64 = 0; - smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */ - smi_cmd.s.phy_adr = phy_id; - smi_cmd.s.reg_adr = regnum; - oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); - - do { - /* Wait 1000 clocks so we don't saturate the RSL bus - * doing reads. - */ - __delay(1000); - smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); - } while (smi_wr.s.pending && --timeout); - - if (timeout <= 0) - return -EIO; - return 0; -} - -static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) -{ - struct octeon_mdiobus *p = bus->priv; - union cvmx_smix_cmd smi_cmd; - union cvmx_smix_rd_dat smi_rd; - unsigned int op = 1; /* MDIO_CLAUSE_22_READ */ - int timeout = 1000; - - if (regnum & MII_ADDR_C45) { - int r = octeon_mdiobus_c45_addr(p, phy_id, regnum); - if (r < 0) - return r; - - regnum = (regnum >> 16) & 0x1f; - op = 3; /* MDIO_CLAUSE_45_READ */ - } else { - octeon_mdiobus_set_mode(p, C22); - } - - - smi_cmd.u64 = 0; - smi_cmd.s.phy_op = op; - smi_cmd.s.phy_adr = phy_id; - smi_cmd.s.reg_adr = regnum; - oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); - - do { - /* Wait 1000 clocks so we don't saturate the RSL bus - * doing reads. - */ - __delay(1000); - smi_rd.u64 = oct_mdio_readq(p->register_base + SMI_RD_DAT); - } while (smi_rd.s.pending && --timeout); - - if (smi_rd.s.val) - return smi_rd.s.dat; - else - return -EIO; -} - -static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id, - int regnum, u16 val) -{ - struct octeon_mdiobus *p = bus->priv; - union cvmx_smix_cmd smi_cmd; - union cvmx_smix_wr_dat smi_wr; - unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */ - int timeout = 1000; - - - if (regnum & MII_ADDR_C45) { - int r = octeon_mdiobus_c45_addr(p, phy_id, regnum); - if (r < 0) - return r; - - regnum = (regnum >> 16) & 0x1f; - op = 1; /* MDIO_CLAUSE_45_WRITE */ - } else { - octeon_mdiobus_set_mode(p, C22); - } - - smi_wr.u64 = 0; - smi_wr.s.dat = val; - oct_mdio_writeq(smi_wr.u64, p->register_base + SMI_WR_DAT); - - smi_cmd.u64 = 0; - smi_cmd.s.phy_op = op; - smi_cmd.s.phy_adr = phy_id; - smi_cmd.s.reg_adr = regnum; - oct_mdio_writeq(smi_cmd.u64, p->register_base + SMI_CMD); - - do { - /* Wait 1000 clocks so we don't saturate the RSL bus - * doing reads. - */ - __delay(1000); - smi_wr.u64 = oct_mdio_readq(p->register_base + SMI_WR_DAT); - } while (smi_wr.s.pending && --timeout); - - if (timeout <= 0) - return -EIO; - - return 0; -} +#include "mdio-cavium.h" static int octeon_mdiobus_probe(struct platform_device *pdev) { - struct octeon_mdiobus *bus; + struct cavium_mdiobus *bus; struct mii_bus *mii_bus; struct resource *res_mem; + resource_size_t mdio_phys; + resource_size_t regsize; union cvmx_smix_en smi_en; int err = -ENOENT; @@ -284,17 +38,17 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) bus = mii_bus->priv; bus->mii_bus = mii_bus; - bus->mdio_phys = res_mem->start; - bus->regsize = resource_size(res_mem); + mdio_phys = res_mem->start; + regsize = resource_size(res_mem); - if (!devm_request_mem_region(&pdev->dev, bus->mdio_phys, bus->regsize, + if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize, res_mem->name)) { dev_err(&pdev->dev, "request_mem_region failed\n"); return -ENXIO; } bus->register_base = - (u64)devm_ioremap(&pdev->dev, bus->mdio_phys, bus->regsize); + (u64)devm_ioremap(&pdev->dev, mdio_phys, regsize); if (!bus->register_base) { dev_err(&pdev->dev, "dev_ioremap failed\n"); return -ENOMEM; @@ -304,13 +58,12 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) smi_en.s.en = 1; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); - bus->mii_bus->priv = bus; - bus->mii_bus->name = "mdio-octeon"; + bus->mii_bus->name = KBUILD_MODNAME; snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", bus->register_base); bus->mii_bus->parent = &pdev->dev; - bus->mii_bus->read = octeon_mdiobus_read; - bus->mii_bus->write = octeon_mdiobus_write; + bus->mii_bus->read = cavium_mdiobus_read; + bus->mii_bus->write = cavium_mdiobus_write; platform_set_drvdata(pdev, bus); @@ -318,7 +71,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev) if (err) goto fail_register; - dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + dev_info(&pdev->dev, "Probed\n"); return 0; fail_register: @@ -330,7 +83,7 @@ fail_register: static int octeon_mdiobus_remove(struct platform_device *pdev) { - struct octeon_mdiobus *bus; + struct cavium_mdiobus *bus; union cvmx_smix_en smi_en; bus = platform_get_drvdata(pdev); @@ -352,7 +105,7 @@ MODULE_DEVICE_TABLE(of, octeon_mdiobus_match); static struct platform_driver octeon_mdiobus_driver = { .driver = { - .name = "mdio-octeon", + .name = KBUILD_MODNAME, .of_match_table = octeon_mdiobus_match, }, .probe = octeon_mdiobus_probe, @@ -367,7 +120,6 @@ EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); module_platform_driver(octeon_mdiobus_driver); -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_VERSION(DRV_VERSION); +MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver"); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 379d7ac7ca31722a1fb488ae3e98b274c9db568c Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 11 Mar 2016 09:53:11 -0800 Subject: phy: mdio-thunder: Add driver for Cavium Thunder SoC MDIO buses. The Cavium Thunder SoCs have multiple MIDO buses that are part of a single PCI device. To model this in the device tree we call the PCI parent device a "cavium,thunder-8890-mdio-nexus", it has several children, one for each MDIO bus. The MDIO bus hardware is identical to that found in the OCTEON SoCs, so we use that code for things that are not part of the PCI driver probe/remove Signed-off-by: David Daney Signed-off-by: David S. Miller --- .../devicetree/bindings/net/cavium-mdio.txt | 61 +++++++- drivers/net/phy/Kconfig | 11 ++ drivers/net/phy/Makefile | 1 + drivers/net/phy/mdio-thunder.c | 154 +++++++++++++++++++++ 4 files changed, 225 insertions(+), 2 deletions(-) create mode 100644 drivers/net/phy/mdio-thunder.c (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/net/cavium-mdio.txt b/Documentation/devicetree/bindings/net/cavium-mdio.txt index 04cb7491d232..020df08b8a30 100644 --- a/Documentation/devicetree/bindings/net/cavium-mdio.txt +++ b/Documentation/devicetree/bindings/net/cavium-mdio.txt @@ -1,9 +1,12 @@ * System Management Interface (SMI) / MDIO Properties: -- compatible: "cavium,octeon-3860-mdio" +- compatible: One of: - Compatibility with all cn3XXX, cn5XXX and cn6XXX SOCs. + "cavium,octeon-3860-mdio": Compatibility with all cn3XXX, cn5XXX + and cn6XXX SOCs. + + "cavium,thunder-8890-mdio": Compatibility with all cn8XXX SOCs. - reg: The base address of the MDIO bus controller register bank. @@ -25,3 +28,57 @@ Example: reg = <0>; }; }; + + +* System Management Interface (SMI) / MDIO Nexus + + Several mdio buses may be gathered as children of a single PCI + device, this PCI device is the nexus of the buses. + +Properties: + +- compatible: "cavium,thunder-8890-mdio-nexus"; + +- reg: The PCI device and function numbers of the nexus device. + +- #address-cells: Must be <2>. + +- #size-cells: Must be <2>. + +- ranges: As needed for mapping of the MDIO bus device registers. + +- assigned-addresses: As needed for mapping of the MDIO bus device registers. + +Example: + + mdio-nexus@1,3 { + compatible = "cavium,thunder-8890-mdio-nexus"; + #address-cells = <2>; + #size-cells = <2>; + reg = <0x0b00 0 0 0 0>; /* DEVFN = 0x0b (1:3) */ + assigned-addresses = <0x03000000 0x87e0 0x05000000 0x0 0x800000>; + ranges = <0x87e0 0x05000000 0x03000000 0x87e0 0x05000000 0x0 0x800000>; + + mdio0@87e0,05003800 { + compatible = "cavium,thunder-8890-mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x87e0 0x05003800 0x0 0x30>; + + ethernet-phy@0 { + ... + reg = <0>; + }; + }; + mdio0@87e0,05003880 { + compatible = "cavium,thunder-8890-mdio"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x87e0 0x05003880 0x0 0x30>; + + ethernet-phy@0 { + ... + reg = <0>; + }; + }; + }; diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 40faec9f3b0b..075a4cc175b1 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -196,6 +196,17 @@ config MDIO_OCTEON buses. It is required by the Octeon and ThunderX ethernet device drivers on some systems. +config MDIO_THUNDER + tristate "Support for MDIO buses on on ThunderX SOCs" + depends on 64BIT + depends on PCI + select MDIO_CAVIUM + help + This driver supports the MDIO interfaces found on Cavium + ThunderX SoCs when the MDIO bus device appears on as a PCI + device. + + config MDIO_SUN4I tristate "Allwinner sun4i MDIO interface support" depends on ARCH_SUNXI diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 041b3d977d31..fcdbb9299fab 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -31,6 +31,7 @@ obj-$(CONFIG_DP83867_PHY) += dp83867.o obj-$(CONFIG_STE10XP) += ste10Xp.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o +obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_AT803X_PHY) += at803x.o diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c new file mode 100644 index 000000000000..564616968cad --- /dev/null +++ b/drivers/net/phy/mdio-thunder.c @@ -0,0 +1,154 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009-2016 Cavium, Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mdio-cavium.h" + +struct thunder_mdiobus_nexus { + void __iomem *bar0; + struct cavium_mdiobus *buses[4]; +}; + +static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device_node *node; + struct fwnode_handle *fwn; + struct thunder_mdiobus_nexus *nexus; + int err; + int i; + + nexus = devm_kzalloc(&pdev->dev, sizeof(*nexus), GFP_KERNEL); + if (!nexus) + return -ENOMEM; + + pci_set_drvdata(pdev, nexus); + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + pci_set_drvdata(pdev, NULL); + return err; + } + + err = pci_request_regions(pdev, KBUILD_MODNAME); + if (err) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + goto err_disable_device; + } + + nexus->bar0 = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!nexus->bar0) { + err = -ENOMEM; + goto err_release_regions; + } + + i = 0; + device_for_each_child_node(&pdev->dev, fwn) { + struct resource r; + struct mii_bus *mii_bus; + struct cavium_mdiobus *bus; + union cvmx_smix_en smi_en; + + /* If it is not an OF node we cannot handle it yet, so + * exit the loop. + */ + node = to_of_node(fwn); + if (!node) + break; + + err = of_address_to_resource(node, 0, &r); + if (err) { + dev_err(&pdev->dev, + "Couldn't translate address for \"%s\"\n", + node->name); + break; + } + + mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus)); + if (!mii_bus) + break; + bus = mii_bus->priv; + bus->mii_bus = mii_bus; + + nexus->buses[i] = bus; + i++; + + bus->register_base = (u64)nexus->bar0 + + r.start - pci_resource_start(pdev, 0); + + smi_en.u64 = 0; + smi_en.s.en = 1; + oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); + bus->mii_bus->name = KBUILD_MODNAME; + snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%llx", r.start); + bus->mii_bus->parent = &pdev->dev; + bus->mii_bus->read = cavium_mdiobus_read; + bus->mii_bus->write = cavium_mdiobus_write; + + err = of_mdiobus_register(bus->mii_bus, node); + if (err) + dev_err(&pdev->dev, "of_mdiobus_register failed\n"); + + dev_info(&pdev->dev, "Added bus at %llx\n", r.start); + if (i >= ARRAY_SIZE(nexus->buses)) + break; + } + return 0; + +err_release_regions: + pci_release_regions(pdev); + +err_disable_device: + pci_set_drvdata(pdev, NULL); + return err; +} + +static void thunder_mdiobus_pci_remove(struct pci_dev *pdev) +{ + int i; + struct thunder_mdiobus_nexus *nexus = pci_get_drvdata(pdev); + + for (i = 0; i < ARRAY_SIZE(nexus->buses); i++) { + struct cavium_mdiobus *bus = nexus->buses[i]; + + if (!bus) + continue; + + mdiobus_unregister(bus->mii_bus); + mdiobus_free(bus->mii_bus); + oct_mdio_writeq(0, bus->register_base + SMI_EN); + } + pci_set_drvdata(pdev, NULL); +} + +static const struct pci_device_id thunder_mdiobus_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, 0xa02b) }, + { 0, } /* End of table. */ +}; +MODULE_DEVICE_TABLE(pci, thunder_mdiobus_id_table); + +static struct pci_driver thunder_mdiobus_driver = { + .name = KBUILD_MODNAME, + .id_table = thunder_mdiobus_id_table, + .probe = thunder_mdiobus_pci_probe, + .remove = thunder_mdiobus_pci_remove, +}; + +module_pci_driver(thunder_mdiobus_driver); + +MODULE_DESCRIPTION("Cavium ThunderX MDIO bus driver"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From a3478bae7a2751500386816ca567af4fc0f5eb10 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Mar 2016 00:33:13 +0300 Subject: of_mdio: mdio_device_create() never returns NULL mdio_device_create() never returns NULL, thus checking for it in of_mdiobus_register_device() is pointless... Suggested-by: Vladimir Zapolskiy Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 5e7838290998..3bc68acac64c 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -98,7 +98,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio, int rc; mdiodev = mdio_device_create(mdio, addr); - if (!mdiodev || IS_ERR(mdiodev)) + if (IS_ERR(mdiodev)) return 1; /* Associate the OF node with the device structure so it -- cgit v1.2.3 From ac044b902e50ba50713e257e331bc0e0c1c7d4d9 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Mar 2016 00:34:02 +0300 Subject: of_mdio: use IS_ERR_OR_NULL() IS_ERR_OR_NULL() is open coded in of_mdiobus_register_phy(), so just call it directly... Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 3bc68acac64c..52d9b14e1717 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -56,7 +56,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi phy = phy_device_create(mdio, addr, phy_id, 0, NULL); else phy = get_phy_device(mdio, addr, is_c45); - if (!phy || IS_ERR(phy)) + if (IS_ERR_OR_NULL(phy)) return 1; rc = irq_of_parse_and_map(child, 0); -- cgit v1.2.3 From 5189b1d82f1ce6c2749fa3499d28ffd3f5075543 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Mar 2016 00:34:58 +0300 Subject: of_mdio: use PTR_ERR_OR_ZERO() PTR_ERR_OR_ZERO() is open coded in of_phy_register_fixed_link(), so just call it directly... Signed-off-by: Sergei Shtylyov Reviewed-by: Florian Fainelli Reviewed-by: Vladimir Zapolskiy Signed-off-by: David S. Miller --- drivers/of/of_mdio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 52d9b14e1717..8453f08d2ef4 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -412,7 +412,7 @@ int of_phy_register_fixed_link(struct device_node *np) if (strcmp(managed, "in-band-status") == 0) { /* status is zeroed, namely its .link member */ phy = fixed_phy_register(PHY_POLL, &status, -1, np); - return IS_ERR(phy) ? PTR_ERR(phy) : 0; + return PTR_ERR_OR_ZERO(phy); } } @@ -434,7 +434,7 @@ int of_phy_register_fixed_link(struct device_node *np) return -EPROBE_DEFER; phy = fixed_phy_register(PHY_POLL, &status, link_gpio, np); - return IS_ERR(phy) ? PTR_ERR(phy) : 0; + return PTR_ERR_OR_ZERO(phy); } /* Old binding */ @@ -446,7 +446,7 @@ int of_phy_register_fixed_link(struct device_node *np) status.pause = be32_to_cpu(fixed_link_prop[3]); status.asym_pause = be32_to_cpu(fixed_link_prop[4]); phy = fixed_phy_register(PHY_POLL, &status, -1, np); - return IS_ERR(phy) ? PTR_ERR(phy) : 0; + return PTR_ERR_OR_ZERO(phy); } return -ENODEV; -- cgit v1.2.3 From ca3dfa51e67cd41f6514b84a88bc101e8b1a139a Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 12 Mar 2016 00:01:36 +0100 Subject: dsa: Rename mv88e6123_61_65 to mv88e6123 to be consistent All the drivers support multiple chips, but mv88e6123_61_65 is the only one that reflects this in its naming. Change it to be consistent with the other drivers. Signed-off-by: Andrew Lunn Reviewed-by: Vivien Didelot Signed-off-by: David S. Miller --- arch/arm/configs/multi_v5_defconfig | 2 +- arch/arm/configs/mvebu_v5_defconfig | 2 +- arch/arm/configs/orion5x_defconfig | 2 +- arch/tile/configs/tilegx_defconfig | 2 +- arch/tile/configs/tilepro_defconfig | 2 +- drivers/net/dsa/Kconfig | 2 +- drivers/net/dsa/Makefile | 4 +- drivers/net/dsa/mv88e6123.c | 124 ++++++++++++++++++++++++++++++++++++ drivers/net/dsa/mv88e6123_61_65.c | 124 ------------------------------------ drivers/net/dsa/mv88e6xxx.c | 8 +-- drivers/net/dsa/mv88e6xxx.h | 2 +- 11 files changed, 137 insertions(+), 137 deletions(-) create mode 100644 drivers/net/dsa/mv88e6123.c delete mode 100644 drivers/net/dsa/mv88e6123_61_65.c (limited to 'drivers') diff --git a/arch/arm/configs/multi_v5_defconfig b/arch/arm/configs/multi_v5_defconfig index 1f9ca4737ef6..afb1f62fb05e 100644 --- a/arch/arm/configs/multi_v5_defconfig +++ b/arch/arm/configs/multi_v5_defconfig @@ -91,7 +91,7 @@ CONFIG_SATA_MV=y CONFIG_NETDEVICES=y CONFIG_NET_DSA_MV88E6060=y CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y +CONFIG_NET_DSA_MV88E6123=y CONFIG_NET_DSA_MV88E6171=y CONFIG_NET_DSA_MV88E6352=y CONFIG_MV643XX_ETH=y diff --git a/arch/arm/configs/mvebu_v5_defconfig b/arch/arm/configs/mvebu_v5_defconfig index af29780accdc..6c4c54037bc4 100644 --- a/arch/arm/configs/mvebu_v5_defconfig +++ b/arch/arm/configs/mvebu_v5_defconfig @@ -92,7 +92,7 @@ CONFIG_SATA_MV=y CONFIG_NETDEVICES=y CONFIG_NET_DSA_MV88E6060=y CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y +CONFIG_NET_DSA_MV88E6123=y CONFIG_NET_DSA_MV88E6171=y CONFIG_NET_DSA_MV88E6352=y CONFIG_MV643XX_ETH=y diff --git a/arch/arm/configs/orion5x_defconfig b/arch/arm/configs/orion5x_defconfig index 5876ce7af130..6a5bc27538f1 100644 --- a/arch/arm/configs/orion5x_defconfig +++ b/arch/arm/configs/orion5x_defconfig @@ -86,7 +86,7 @@ CONFIG_SATA_MV=y CONFIG_NETDEVICES=y CONFIG_MII=y CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y +CONFIG_NET_DSA_MV88E6123=y CONFIG_MV643XX_ETH=y CONFIG_MARVELL_PHY=y # CONFIG_INPUT_MOUSEDEV is not set diff --git a/arch/tile/configs/tilegx_defconfig b/arch/tile/configs/tilegx_defconfig index 37dc9364c4a1..984fa00a8c25 100644 --- a/arch/tile/configs/tilegx_defconfig +++ b/arch/tile/configs/tilegx_defconfig @@ -222,7 +222,7 @@ CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y +CONFIG_NET_DSA_MV88E6123=y CONFIG_SKY2=y CONFIG_PTP_1588_CLOCK_TILEGX=y # CONFIG_WLAN is not set diff --git a/arch/tile/configs/tilepro_defconfig b/arch/tile/configs/tilepro_defconfig index 76a2781dec2c..71ad9f7e40c9 100644 --- a/arch/tile/configs/tilepro_defconfig +++ b/arch/tile/configs/tilepro_defconfig @@ -341,7 +341,7 @@ CONFIG_TUN=y CONFIG_VETH=m CONFIG_NET_DSA_MV88E6060=y CONFIG_NET_DSA_MV88E6131=y -CONFIG_NET_DSA_MV88E6123_61_65=y +CONFIG_NET_DSA_MV88E6123=y # CONFIG_NET_VENDOR_3COM is not set CONFIG_E1000E=y # CONFIG_WLAN is not set diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 4c483d937481..90ba003d8fdf 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -27,7 +27,7 @@ config NET_DSA_MV88E6131 This enables support for the Marvell 88E6085/6095/6095F/6131 ethernet switch chips. -config NET_DSA_MV88E6123_61_65 +config NET_DSA_MV88E6123 tristate "Marvell 88E6123/6161/6165 ethernet switch chip support" depends on NET_DSA select NET_DSA_MV88E6XXX diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile index e2d51c4b9382..a6e09939be65 100644 --- a/drivers/net/dsa/Makefile +++ b/drivers/net/dsa/Makefile @@ -1,8 +1,8 @@ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o mv88e6xxx_drv-y += mv88e6xxx.o -ifdef CONFIG_NET_DSA_MV88E6123_61_65 -mv88e6xxx_drv-y += mv88e6123_61_65.o +ifdef CONFIG_NET_DSA_MV88E6123 +mv88e6xxx_drv-y += mv88e6123.o endif ifdef CONFIG_NET_DSA_MV88E6131 mv88e6xxx_drv-y += mv88e6131.o diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c new file mode 100644 index 000000000000..69a6f79dcb10 --- /dev/null +++ b/drivers/net/dsa/mv88e6123.c @@ -0,0 +1,124 @@ +/* + * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support + * Copyright (c) 2008-2009 Marvell Semiconductor + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "mv88e6xxx.h" + +static const struct mv88e6xxx_switch_id mv88e6123_table[] = { + { PORT_SWITCH_ID_6123, "Marvell 88E6123" }, + { PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" }, + { PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" }, + { PORT_SWITCH_ID_6161, "Marvell 88E6161" }, + { PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" }, + { PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" }, + { PORT_SWITCH_ID_6165, "Marvell 88E6165" }, + { PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" }, + { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" }, +}; + +static char *mv88e6123_probe(struct device *host_dev, int sw_addr) +{ + return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table, + ARRAY_SIZE(mv88e6123_table)); +} + +static int mv88e6123_setup_global(struct dsa_switch *ds) +{ + u32 upstream_port = dsa_upstream_port(ds); + int ret; + u32 reg; + + ret = mv88e6xxx_setup_global(ds); + if (ret) + return ret; + + /* Disable the PHY polling unit (since there won't be any + * external PHYs to poll), don't discard packets with + * excessive collisions, and mask all interrupt sources. + */ + REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000); + + /* Configure the upstream port, and configure the upstream + * port as the port to which ingress and egress monitor frames + * are to be sent. + */ + reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | + upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; + REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); + + /* Disable remote management for now, and set the switch's + * DSA device number. + */ + REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f); + + return 0; +} + +static int mv88e6123_setup(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + int ret; + + ret = mv88e6xxx_setup_common(ds); + if (ret < 0) + return ret; + + switch (ps->id) { + case PORT_SWITCH_ID_6123: + ps->num_ports = 3; + break; + case PORT_SWITCH_ID_6161: + case PORT_SWITCH_ID_6165: + ps->num_ports = 6; + break; + default: + return -ENODEV; + } + + ret = mv88e6xxx_switch_reset(ds, false); + if (ret < 0) + return ret; + + ret = mv88e6123_setup_global(ds); + if (ret < 0) + return ret; + + return mv88e6xxx_setup_ports(ds); +} + +struct dsa_switch_driver mv88e6123_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_EDSA, + .priv_size = sizeof(struct mv88e6xxx_priv_state), + .probe = mv88e6123_probe, + .setup = mv88e6123_setup, + .set_addr = mv88e6xxx_set_addr_indirect, + .phy_read = mv88e6xxx_phy_read, + .phy_write = mv88e6xxx_phy_write, + .get_strings = mv88e6xxx_get_strings, + .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, + .get_sset_count = mv88e6xxx_get_sset_count, + .adjust_link = mv88e6xxx_adjust_link, +#ifdef CONFIG_NET_DSA_HWMON + .get_temp = mv88e6xxx_get_temp, +#endif + .get_regs_len = mv88e6xxx_get_regs_len, + .get_regs = mv88e6xxx_get_regs, +}; + +MODULE_ALIAS("platform:mv88e6123"); +MODULE_ALIAS("platform:mv88e6161"); +MODULE_ALIAS("platform:mv88e6165"); diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c deleted file mode 100644 index d4fcf4570d95..000000000000 --- a/drivers/net/dsa/mv88e6123_61_65.c +++ /dev/null @@ -1,124 +0,0 @@ -/* - * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support - * Copyright (c) 2008-2009 Marvell Semiconductor - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include "mv88e6xxx.h" - -static const struct mv88e6xxx_switch_id mv88e6123_61_65_table[] = { - { PORT_SWITCH_ID_6123, "Marvell 88E6123" }, - { PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" }, - { PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" }, - { PORT_SWITCH_ID_6161, "Marvell 88E6161" }, - { PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" }, - { PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" }, - { PORT_SWITCH_ID_6165, "Marvell 88E6165" }, - { PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" }, - { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" }, -}; - -static char *mv88e6123_61_65_probe(struct device *host_dev, int sw_addr) -{ - return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_61_65_table, - ARRAY_SIZE(mv88e6123_61_65_table)); -} - -static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) -{ - u32 upstream_port = dsa_upstream_port(ds); - int ret; - u32 reg; - - ret = mv88e6xxx_setup_global(ds); - if (ret) - return ret; - - /* Disable the PHY polling unit (since there won't be any - * external PHYs to poll), don't discard packets with - * excessive collisions, and mask all interrupt sources. - */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000); - - /* Configure the upstream port, and configure the upstream - * port as the port to which ingress and egress monitor frames - * are to be sent. - */ - reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT | - upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT; - REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg); - - /* Disable remote management for now, and set the switch's - * DSA device number. - */ - REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f); - - return 0; -} - -static int mv88e6123_61_65_setup(struct dsa_switch *ds) -{ - struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); - int ret; - - ret = mv88e6xxx_setup_common(ds); - if (ret < 0) - return ret; - - switch (ps->id) { - case PORT_SWITCH_ID_6123: - ps->num_ports = 3; - break; - case PORT_SWITCH_ID_6161: - case PORT_SWITCH_ID_6165: - ps->num_ports = 6; - break; - default: - return -ENODEV; - } - - ret = mv88e6xxx_switch_reset(ds, false); - if (ret < 0) - return ret; - - ret = mv88e6123_61_65_setup_global(ds); - if (ret < 0) - return ret; - - return mv88e6xxx_setup_ports(ds); -} - -struct dsa_switch_driver mv88e6123_61_65_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_EDSA, - .priv_size = sizeof(struct mv88e6xxx_priv_state), - .probe = mv88e6123_61_65_probe, - .setup = mv88e6123_61_65_setup, - .set_addr = mv88e6xxx_set_addr_indirect, - .phy_read = mv88e6xxx_phy_read, - .phy_write = mv88e6xxx_phy_write, - .get_strings = mv88e6xxx_get_strings, - .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, - .get_sset_count = mv88e6xxx_get_sset_count, - .adjust_link = mv88e6xxx_adjust_link, -#ifdef CONFIG_NET_DSA_HWMON - .get_temp = mv88e6xxx_get_temp, -#endif - .get_regs_len = mv88e6xxx_get_regs_len, - .get_regs = mv88e6xxx_get_regs, -}; - -MODULE_ALIAS("platform:mv88e6123"); -MODULE_ALIAS("platform:mv88e6161"); -MODULE_ALIAS("platform:mv88e6165"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 5f07524083c3..5309c738ff00 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2989,8 +2989,8 @@ static int __init mv88e6xxx_init(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) register_switch_driver(&mv88e6131_switch_driver); #endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) - register_switch_driver(&mv88e6123_61_65_switch_driver); +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123) + register_switch_driver(&mv88e6123_switch_driver); #endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) register_switch_driver(&mv88e6352_switch_driver); @@ -3010,8 +3010,8 @@ static void __exit mv88e6xxx_cleanup(void) #if IS_ENABLED(CONFIG_NET_DSA_MV88E6352) unregister_switch_driver(&mv88e6352_switch_driver); #endif -#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65) - unregister_switch_driver(&mv88e6123_61_65_switch_driver); +#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123) + unregister_switch_driver(&mv88e6123_switch_driver); #endif #if IS_ENABLED(CONFIG_NET_DSA_MV88E6131) unregister_switch_driver(&mv88e6131_switch_driver); diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 3425616987ed..281cefe86afd 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -519,7 +519,7 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page, int reg, int val); extern struct dsa_switch_driver mv88e6131_switch_driver; -extern struct dsa_switch_driver mv88e6123_61_65_switch_driver; +extern struct dsa_switch_driver mv88e6123_switch_driver; extern struct dsa_switch_driver mv88e6352_switch_driver; extern struct dsa_switch_driver mv88e6171_switch_driver; -- cgit v1.2.3 From 5bcbe0f35fb13e31fdd0b2dc9695f19ab0208207 Mon Sep 17 00:00:00 2001 From: Andrew Lunn Date: Sat, 12 Mar 2016 00:01:40 +0100 Subject: phy: fixed: Fix removal of phys. The fixed phys delete function simply removed the fixed phy from the internal linked list and freed the memory. It however did not unregister the associated phy device. This meant it was still possible to find the phy device on the mdio bus. Make fixed_phy_del() an internal function and add a fixed_phy_unregister() to unregisters the phy device and then uses fixed_phy_del() to free resources. Modify DSA to use this new API function, so we don't leak phys. Signed-off-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/fixed_phy.c | 11 +++++++++-- include/linux/phy_fixed.h | 5 ++--- net/dsa/dsa.c | 4 +--- 3 files changed, 12 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index ab9c473d75ea..fc07a8866020 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -285,7 +285,7 @@ err_regs: } EXPORT_SYMBOL_GPL(fixed_phy_add); -void fixed_phy_del(int phy_addr) +static void fixed_phy_del(int phy_addr) { struct fixed_mdio_bus *fmb = &platform_fmb; struct fixed_phy *fp, *tmp; @@ -300,7 +300,6 @@ void fixed_phy_del(int phy_addr) } } } -EXPORT_SYMBOL_GPL(fixed_phy_del); static int phy_fixed_addr; static DEFINE_SPINLOCK(phy_fixed_addr_lock); @@ -371,6 +370,14 @@ struct phy_device *fixed_phy_register(unsigned int irq, } EXPORT_SYMBOL_GPL(fixed_phy_register); +void fixed_phy_unregister(struct phy_device *phy) +{ + phy_device_remove(phy); + + fixed_phy_del(phy->mdio.addr); +} +EXPORT_SYMBOL_GPL(fixed_phy_unregister); + static int __init fixed_mdio_bus_init(void) { struct fixed_mdio_bus *fmb = &platform_fmb; diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 2400d2ea4f34..1d41ec44e39d 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h @@ -19,7 +19,7 @@ extern struct phy_device *fixed_phy_register(unsigned int irq, struct fixed_phy_status *status, int link_gpio, struct device_node *np); -extern void fixed_phy_del(int phy_addr); +extern void fixed_phy_unregister(struct phy_device *phydev); extern int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, struct fixed_phy_status *)); @@ -40,9 +40,8 @@ static inline struct phy_device *fixed_phy_register(unsigned int irq, { return ERR_PTR(-ENODEV); } -static inline int fixed_phy_del(int phy_addr) +static inline void fixed_phy_unregister(struct phy_device *phydev) { - return -ENODEV; } static inline int fixed_phy_set_link_update(struct phy_device *phydev, int (*link_update)(struct net_device *, diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index f100f340d93f..c28c47463b7e 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -447,11 +447,9 @@ static void dsa_switch_destroy(struct dsa_switch *ds) if (of_phy_is_fixed_link(port_dn)) { phydev = of_phy_find_device(port_dn); if (phydev) { - int addr = phydev->mdio.addr; - phy_device_free(phydev); of_node_put(port_dn); - fixed_phy_del(addr); + fixed_phy_unregister(phydev); } } } -- cgit v1.2.3 From 1c2722a975fdb8c90bc6ba8570b7fb62db4e2e9c Mon Sep 17 00:00:00 2001 From: Gregory CLEMENT Date: Sat, 12 Mar 2016 18:44:17 +0100 Subject: net: mvneta: Fix spinlock usage In the previous patch, the spinlock was not initialized. While it didn't cause any trouble yet it could be a problem to use it uninitialized. The most annoying part was the critical section protected by the spinlock in mvneta_stop(). Some of the functions could sleep as pointed when activated CONFIG_DEBUG_ATOMIC_SLEEP. Actually, in mvneta_stop() we only need to protect the is_stopped flagged, indeed the code of the notifier for CPU online is protected by the same spinlock, so when we get the lock, the notifer work is done. Reported-by: Patrick Uiterwijk Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index be92668faf3e..665eac2c2469 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -3480,17 +3480,17 @@ static int mvneta_stop(struct net_device *dev) struct mvneta_port *pp = netdev_priv(dev); /* Inform that we are stopping so we don't want to setup the - * driver for new CPUs in the notifiers + * driver for new CPUs in the notifiers. The code of the + * notifier for CPU online is protected by the same spinlock, + * so when we get the lock, the notifer work is done. */ spin_lock(&pp->lock); pp->is_stopped = true; + spin_unlock(&pp->lock); + mvneta_stop_dev(pp); mvneta_mdio_remove(pp); unregister_cpu_notifier(&pp->cpu_notifier); - /* Now that the notifier are unregistered, we can release le - * lock - */ - spin_unlock(&pp->lock); on_each_cpu(mvneta_percpu_disable, pp, true); free_percpu_irq(dev->irq, pp->ports); mvneta_cleanup_rxqs(pp); @@ -4023,6 +4023,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->ethtool_ops = &mvneta_eth_tool_ops; pp = netdev_priv(dev); + spin_lock_init(&pp->lock); pp->phy_node = phy_node; pp->phy_interface = phy_mode; -- cgit v1.2.3 From 928b6519afeb2a5e2dc61154380b545ed66c476a Mon Sep 17 00:00:00 2001 From: Dmitri Epshtein Date: Sat, 12 Mar 2016 18:44:18 +0100 Subject: net: mvneta: enable change MAC address when interface is up Function eth_prepare_mac_addr_change() is called as part of MAC address change. This function check if interface is running. To enable change MAC address when interface is running: IFF_LIVE_ADDR_CHANGE flag must be set to dev->priv_flags field Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit") Cc: stable@vger.kernel.org Signed-off-by: Dmitri Epshtein Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 665eac2c2469..09a306628807 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4145,7 +4145,7 @@ static int mvneta_probe(struct platform_device *pdev) dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; dev->hw_features |= dev->features; dev->vlan_features |= dev->features; - dev->priv_flags |= IFF_UNICAST_FLT; + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; err = register_netdev(dev); -- cgit v1.2.3 From 0838abb3c08cff83719b461adffefc83721af34b Mon Sep 17 00:00:00 2001 From: Dmitri Epshtein Date: Sat, 12 Mar 2016 18:44:19 +0100 Subject: net: mvneta: fix error messages in mvneta_port_down function This commit corrects error printing when shutting down the port. [gregory.clement@free-electrons.com: split initial commit in two individual changes] Signed-off-by: Dmitri Epshtein Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 09a306628807..1af60bdea051 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1107,7 +1107,7 @@ static void mvneta_port_down(struct mvneta_port *pp) do { if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { netdev_warn(pp->dev, - "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", + "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", val); break; } @@ -1146,7 +1146,7 @@ static void mvneta_port_down(struct mvneta_port *pp) do { if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { netdev_warn(pp->dev, - "TX FIFO empty timeout status=0x08%x\n", + "TX FIFO empty timeout status=0x%08x\n", val); break; } -- cgit v1.2.3 From a3703fb31a4779e5c515f51f602dda1ffcfbfec9 Mon Sep 17 00:00:00 2001 From: Dmitri Epshtein Date: Sat, 12 Mar 2016 18:44:20 +0100 Subject: net: mvneta: replace magic numbers by existing macros Some literal values are actually already defined by macros, so let's use them. [gregory.clement@free-electrons.com: split intial commit in two individual changes] Signed-off-by: Dmitri Epshtein Signed-off-by: Gregory CLEMENT Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 1af60bdea051..577f7ca7deba 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1114,7 +1114,7 @@ static void mvneta_port_down(struct mvneta_port *pp) mdelay(1); val = mvreg_read(pp, MVNETA_RXQ_CMD); - } while (val & 0xff); + } while (val & MVNETA_RXQ_ENABLE_MASK); /* Stop Tx port activity. Check port Tx activity. Issue stop * command for active channels only @@ -1139,7 +1139,7 @@ static void mvneta_port_down(struct mvneta_port *pp) /* Check TX Command reg that all Txqs are stopped */ val = mvreg_read(pp, MVNETA_TXQ_CMD); - } while (val & 0xff); + } while (val & MVNETA_TXQ_ENABLE_MASK); /* Double check to verify that TX FIFO is empty */ count = 0; -- cgit v1.2.3 From 4fa8c3cc70b2a0aa090bce174c6f2ac148453690 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Sun, 13 Mar 2016 01:29:45 +0300 Subject: sh_eth: kill useless initializers Some of the local variable intializers in the driver turned out to be pointless, kill 'em. Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 2e9a78164054..8385d927adc3 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -447,8 +447,8 @@ static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp) static void sh_eth_select_mii(struct net_device *ndev) { - u32 value = 0x0; struct sh_eth_private *mdp = netdev_priv(ndev); + u32 value; switch (mdp->phy_interface) { case PHY_INTERFACE_MODE_GMII: @@ -1231,8 +1231,8 @@ ring_free: static int sh_eth_dev_init(struct net_device *ndev, bool start) { - int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; /* Soft Reset */ ret = sh_eth_reset(ndev); @@ -1355,7 +1355,7 @@ static int sh_eth_txfree(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_txdesc *txdesc; int free_num = 0; - int entry = 0; + int entry; for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { entry = mdp->dirty_tx % mdp->num_tx_ring; @@ -1396,10 +1396,10 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; int limit; struct sk_buff *skb; - u16 pkt_len = 0; u32 desc_status; int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; dma_addr_t dma_addr; + u16 pkt_len; u32 buf_len; boguscnt = min(boguscnt, *quota); @@ -1778,7 +1778,7 @@ static int sh_eth_phy_init(struct net_device *ndev) { struct device_node *np = ndev->dev.parent->of_node; struct sh_eth_private *mdp = netdev_priv(ndev); - struct phy_device *phydev = NULL; + struct phy_device *phydev; mdp->link = 0; mdp->speed = 0; @@ -2232,8 +2232,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { /* network device open function */ static int sh_eth_open(struct net_device *ndev) { - int ret = 0; struct sh_eth_private *mdp = netdev_priv(ndev); + int ret; pm_runtime_get_sync(&mdp->pdev->dev); @@ -2988,12 +2988,12 @@ static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev) static int sh_eth_drv_probe(struct platform_device *pdev) { - int ret, devno = 0; struct resource *res; - struct net_device *ndev = NULL; - struct sh_eth_private *mdp = NULL; struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev); const struct platform_device_id *id = platform_get_device_id(pdev); + struct sh_eth_private *mdp; + struct net_device *ndev; + int ret, devno; /* get base addr */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- cgit v1.2.3 From 464be1e0be687ffbad64014099f7d0c1a5f3723e Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Sun, 13 Mar 2016 00:18:06 +0100 Subject: mISDN: Order IPAC register defines It looks like IPAC/ISAC chips register defines weren't in any particular order. Order them by their number to make it easier to spot holes. Signed-off-by: Maciej S. Szmigiero Acked-by: Karsten Keil Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/ipac.h | 40 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h index 8121e046b739..76aa6f8f298e 100644 --- a/drivers/isdn/hardware/mISDN/ipac.h +++ b/drivers/isdn/hardware/mISDN/ipac.h @@ -99,32 +99,32 @@ struct ipac_hw { /* All registers original Siemens Spec */ /* IPAC/ISAC registers */ -#define ISAC_MASK 0x20 #define ISAC_ISTA 0x20 -#define ISAC_STAR 0x21 +#define ISAC_MASK 0x20 #define ISAC_CMDR 0x21 +#define ISAC_STAR 0x21 +#define ISAC_MODE 0x22 +#define ISAC_TIMR 0x23 #define ISAC_EXIR 0x24 -#define ISAC_ADF2 0x39 +#define ISAC_RBCL 0x25 +#define ISAC_RSTA 0x27 +#define ISAC_RBCH 0x2A #define ISAC_SPCR 0x30 -#define ISAC_ADF1 0x38 #define ISAC_CIR0 0x31 #define ISAC_CIX0 0x31 -#define ISAC_CIR1 0x33 -#define ISAC_CIX1 0x33 -#define ISAC_STCR 0x37 -#define ISAC_MODE 0x22 -#define ISAC_RSTA 0x27 -#define ISAC_RBCL 0x25 -#define ISAC_RBCH 0x2A -#define ISAC_TIMR 0x23 -#define ISAC_SQXR 0x3b -#define ISAC_SQRR 0x3b -#define ISAC_MOSR 0x3a -#define ISAC_MOCR 0x3a #define ISAC_MOR0 0x32 #define ISAC_MOX0 0x32 +#define ISAC_CIR1 0x33 +#define ISAC_CIX1 0x33 #define ISAC_MOR1 0x34 #define ISAC_MOX1 0x34 +#define ISAC_STCR 0x37 +#define ISAC_ADF1 0x38 +#define ISAC_ADF2 0x39 +#define ISAC_MOCR 0x3a +#define ISAC_MOSR 0x3a +#define ISAC_SQRR 0x3b +#define ISAC_SQXR 0x3b #define ISAC_RBCH_XAC 0x80 @@ -212,13 +212,13 @@ struct ipac_hw { #define ISAC_CMD_DUI 0xF /* ISAC/ISACX/IPAC/IPACX L1 indications */ -#define ISAC_IND_RS 0x1 -#define ISAC_IND_PU 0x7 #define ISAC_IND_DR 0x0 +#define ISAC_IND_RS 0x1 #define ISAC_IND_SD 0x2 #define ISAC_IND_DIS 0x3 -#define ISAC_IND_EI 0x6 #define ISAC_IND_RSY 0x4 +#define ISAC_IND_EI 0x6 +#define ISAC_IND_PU 0x7 #define ISAC_IND_ARD 0x8 #define ISAC_IND_TI 0xA #define ISAC_IND_ATI 0xB @@ -339,9 +339,9 @@ struct ipac_hw { #define ISACX__AUX 0x08 #define ISACX__CIC 0x10 #define ISACX__ST 0x20 +#define IPACX__ON 0x2C #define IPACX__ICB 0x40 #define IPACX__ICA 0x80 -#define IPACX__ON 0x2C /* ISACX/IPACX _CMDRD (W) */ #define ISACX_CMDRD_XRES 0x01 -- cgit v1.2.3 From 1e1589ad8b5cb5b8a6781ba5850cf710ada0e919 Mon Sep 17 00:00:00 2001 From: "Maciej S. Szmigiero" Date: Sun, 13 Mar 2016 00:19:07 +0100 Subject: mISDN: Support DR6 indication in mISDNipac driver According to figure 39 in PEB3086 data sheet, version 1.4 this indication replaces DR when layer 1 transition source state is F6. This fixes mISDN layer 1 getting stuck in F6 state in TE mode on Dialogic Diva 2.02 card (and possibly others) when NT deactivates it. Signed-off-by: Maciej S. Szmigiero Acked-by: Karsten Keil Signed-off-by: David S. Miller --- drivers/isdn/hardware/mISDN/ipac.h | 1 + drivers/isdn/hardware/mISDN/mISDNipac.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/isdn/hardware/mISDN/ipac.h b/drivers/isdn/hardware/mISDN/ipac.h index 76aa6f8f298e..720ee72aab6a 100644 --- a/drivers/isdn/hardware/mISDN/ipac.h +++ b/drivers/isdn/hardware/mISDN/ipac.h @@ -217,6 +217,7 @@ struct ipac_hw { #define ISAC_IND_SD 0x2 #define ISAC_IND_DIS 0x3 #define ISAC_IND_RSY 0x4 +#define ISAC_IND_DR6 0x5 #define ISAC_IND_EI 0x6 #define ISAC_IND_PU 0x7 #define ISAC_IND_ARD 0x8 diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index cb428b9ee441..aa9b6c3cadc1 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -80,6 +80,7 @@ isac_ph_state_bh(struct dchannel *dch) l1_event(dch->l1, HW_DEACT_CNF); break; case ISAC_IND_DR: + case ISAC_IND_DR6: dch->state = 3; l1_event(dch->l1, HW_DEACT_IND); break; @@ -660,6 +661,7 @@ isac_l1cmd(struct dchannel *dch, u32 cmd) spin_lock_irqsave(isac->hwlock, flags); if ((isac->state == ISAC_IND_EI) || (isac->state == ISAC_IND_DR) || + (isac->state == ISAC_IND_DR6) || (isac->state == ISAC_IND_RS)) ph_command(isac, ISAC_CMD_TIM); else -- cgit v1.2.3 From 71327a4e7d997276d49db92fd3d30008389ee6d5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 13 Mar 2016 16:21:32 -0400 Subject: net: dsa: rename port_*_bridge routines Rename DSA port_join_bridge and port_leave_bridge routines to respectively port_bridge_join and port_bridge_leave in order to respect an implicit Port::Bridge namespace. Signed-off-by: Vivien Didelot Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- Documentation/networking/dsa/dsa.txt | 4 ++-- drivers/net/dsa/bcm_sf2.c | 4 ++-- drivers/net/dsa/mv88e6171.c | 4 ++-- drivers/net/dsa/mv88e6352.c | 4 ++-- include/net/dsa.h | 4 ++-- net/dsa/slave.c | 8 ++++---- 6 files changed, 14 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 974e9c387d1e..3b196c304b73 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt @@ -521,12 +521,12 @@ See Documentation/hwmon/sysfs-interface for details. Bridge layer ------------ -- port_join_bridge: bridge layer function invoked when a given switch port is +- port_bridge_join: bridge layer function invoked when a given switch port is added to a bridge, this function should be doing the necessary at the switch level to permit the joining port from being added to the relevant logical domain for it to ingress/egress traffic with other members of the bridge. -- port_leave_bridge: bridge layer function invoked when a given switch port is +- port_bridge_leave: bridge layer function invoked when a given switch port is removed from a bridge, this function should be doing the necessary at the switch level to deny the leaving port from ingress/egress traffic from the remaining bridge members. When the port leaves the bridge, it should be aged diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 3f627598f277..4bcc9ebf5e06 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1387,8 +1387,8 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = { .port_disable = bcm_sf2_port_disable, .get_eee = bcm_sf2_sw_get_eee, .set_eee = bcm_sf2_sw_set_eee, - .port_join_bridge = bcm_sf2_sw_br_join, - .port_leave_bridge = bcm_sf2_sw_br_leave, + .port_bridge_join = bcm_sf2_sw_br_join, + .port_bridge_leave = bcm_sf2_sw_br_leave, .port_stp_update = bcm_sf2_sw_br_set_stp_state, .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, .port_fdb_add = bcm_sf2_sw_fdb_add, diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c index d72ccbdf53ec..c0164b98fc08 100644 --- a/drivers/net/dsa/mv88e6171.c +++ b/drivers/net/dsa/mv88e6171.c @@ -103,8 +103,8 @@ struct dsa_switch_driver mv88e6171_switch_driver = { #endif .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, - .port_join_bridge = mv88e6xxx_port_bridge_join, - .port_leave_bridge = mv88e6xxx_port_bridge_leave, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, .port_stp_update = mv88e6xxx_port_stp_update, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c index a41fa5043d77..5f528abc8af1 100644 --- a/drivers/net/dsa/mv88e6352.c +++ b/drivers/net/dsa/mv88e6352.c @@ -324,8 +324,8 @@ struct dsa_switch_driver mv88e6352_switch_driver = { .set_eeprom = mv88e6352_set_eeprom, .get_regs_len = mv88e6xxx_get_regs_len, .get_regs = mv88e6xxx_get_regs, - .port_join_bridge = mv88e6xxx_port_bridge_join, - .port_leave_bridge = mv88e6xxx_port_bridge_leave, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, .port_stp_update = mv88e6xxx_port_stp_update, .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, diff --git a/include/net/dsa.h b/include/net/dsa.h index 26c0a3fa009a..004e034184c1 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -296,9 +296,9 @@ struct dsa_switch_driver { /* * Bridge integration */ - int (*port_join_bridge)(struct dsa_switch *ds, int port, + int (*port_bridge_join)(struct dsa_switch *ds, int port, struct net_device *bridge); - int (*port_leave_bridge)(struct dsa_switch *ds, int port); + int (*port_bridge_leave)(struct dsa_switch *ds, int port); int (*port_stp_update)(struct dsa_switch *ds, int port, u8 state); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 49056d90b179..52653d715f64 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -448,8 +448,8 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, p->bridge_dev = br; - if (ds->drv->port_join_bridge) - ret = ds->drv->port_join_bridge(ds, p->port, br); + if (ds->drv->port_bridge_join) + ret = ds->drv->port_bridge_join(ds, p->port, br); return ret; } @@ -461,8 +461,8 @@ static int dsa_slave_bridge_port_leave(struct net_device *dev) int ret = -EOPNOTSUPP; - if (ds->drv->port_leave_bridge) - ret = ds->drv->port_leave_bridge(ds, p->port); + if (ds->drv->port_bridge_leave) + ret = ds->drv->port_bridge_leave(ds, p->port); p->bridge_dev = NULL; -- cgit v1.2.3 From 16bfa7024eba5e36aff38ba62086b9027373007d Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 13 Mar 2016 16:21:33 -0400 Subject: net: dsa: make port_bridge_leave return void netdev_upper_dev_unlink() which notifies NETDEV_CHANGEUPPER, returns void, as well as del_nbp(). So there's no advantage to catch an eventual error from the port_bridge_leave routine at the DSA level. Make this routine void for the DSA layer and its existing drivers. Signed-off-by: Vivien Didelot Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/dsa/bcm_sf2.c | 4 +--- drivers/net/dsa/mv88e6xxx.c | 28 +++++++++------------------- drivers/net/dsa/mv88e6xxx.h | 2 +- include/net/dsa.h | 2 +- net/dsa/slave.c | 9 +++------ 5 files changed, 15 insertions(+), 30 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 4bcc9ebf5e06..95944d5e3e22 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -516,7 +516,7 @@ static int bcm_sf2_sw_br_join(struct dsa_switch *ds, int port, return 0; } -static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) +static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); struct net_device *bridge = priv->port_sts[port].bridge_dev; @@ -543,8 +543,6 @@ static int bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port) core_writel(priv, p_ctl, CORE_PORT_VLAN_CTL_PORT(port)); priv->port_sts[port].vlan_ctl_mask = p_ctl; priv->port_sts[port].bridge_dev = NULL; - - return 0; } static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 5309c738ff00..fa086e09d6b7 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -2219,39 +2219,29 @@ unlock: return err; } -int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) +void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port) { struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); struct net_device *bridge = ps->ports[port].bridge_dev; u16 fid; - int i, err; + int i; mutex_lock(&ps->smi_mutex); /* Give the port a fresh Filtering Information Database */ - err = _mv88e6xxx_fid_new(ds, &fid); - if (err) - goto unlock; - - err = _mv88e6xxx_port_fid_set(ds, port, fid); - if (err) - goto unlock; + if (_mv88e6xxx_fid_new(ds, &fid) || + _mv88e6xxx_port_fid_set(ds, port, fid)) + netdev_warn(ds->ports[port], "failed to assign a new FID\n"); /* Unassign the bridge and remap each port's VLANTable */ ps->ports[port].bridge_dev = NULL; - for (i = 0; i < ps->num_ports; ++i) { - if (i == port || ps->ports[i].bridge_dev == bridge) { - err = _mv88e6xxx_port_based_vlan_map(ds, i); - if (err) - break; - } - } + for (i = 0; i < ps->num_ports; ++i) + if (i == port || ps->ports[i].bridge_dev == bridge) + if (_mv88e6xxx_port_based_vlan_map(ds, i)) + netdev_warn(ds->ports[i], "failed to remap\n"); -unlock: mutex_unlock(&ps->smi_mutex); - - return err; } static void mv88e6xxx_bridge_work(struct work_struct *work) diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h index 281cefe86afd..9a038aba48fb 100644 --- a/drivers/net/dsa/mv88e6xxx.h +++ b/drivers/net/dsa/mv88e6xxx.h @@ -488,7 +488,7 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port, struct phy_device *phydev, struct ethtool_eee *e); int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *bridge); -int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); +void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port); int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state); int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering); diff --git a/include/net/dsa.h b/include/net/dsa.h index 004e034184c1..6463bb2863ac 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -298,7 +298,7 @@ struct dsa_switch_driver { */ int (*port_bridge_join)(struct dsa_switch *ds, int port, struct net_device *bridge); - int (*port_bridge_leave)(struct dsa_switch *ds, int port); + void (*port_bridge_leave)(struct dsa_switch *ds, int port); int (*port_stp_update)(struct dsa_switch *ds, int port, u8 state); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 52653d715f64..8e00f1d83eb8 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -454,15 +454,14 @@ static int dsa_slave_bridge_port_join(struct net_device *dev, return ret; } -static int dsa_slave_bridge_port_leave(struct net_device *dev) +static void dsa_slave_bridge_port_leave(struct net_device *dev) { struct dsa_slave_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->parent; - int ret = -EOPNOTSUPP; if (ds->drv->port_bridge_leave) - ret = ds->drv->port_bridge_leave(ds, p->port); + ds->drv->port_bridge_leave(ds, p->port); p->bridge_dev = NULL; @@ -470,8 +469,6 @@ static int dsa_slave_bridge_port_leave(struct net_device *dev) * so allow it to be in BR_STATE_FORWARDING to be kept functional */ dsa_slave_stp_update(dev, BR_STATE_FORWARDING); - - return ret; } static int dsa_slave_port_attr_get(struct net_device *dev, @@ -1156,7 +1153,7 @@ static int dsa_slave_master_changed(struct net_device *dev) !strcmp(master->rtnl_link_ops->kind, "bridge")) err = dsa_slave_bridge_port_join(dev, master); else if (dsa_port_is_bridged(p)) - err = dsa_slave_bridge_port_leave(dev); + dsa_slave_bridge_port_leave(dev); return err; } -- cgit v1.2.3 From e3ebd894f084255fde19116955ba7054858ff5d6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 14 Mar 2016 23:45:12 +0100 Subject: smc91x: avoid self-comparison warning The smc91x driver defines a macro that compares its argument to itself, apparently to get a true result while using its argument to avoid a warning about unused local variables. Unfortunately, this triggers a warning with gcc-6, as the comparison is obviously useless: drivers/net/ethernet/smsc/smc91x.c: In function 'smc_hardware_send_pkt': drivers/net/ethernet/smsc/smc91x.c:563:14: error: self-comparison always evaluates to true [-Werror=tautological-compare] if (!smc_special_trylock(&lp->lock, flags)) { This replaces the macro with another one that behaves similarly, with a cast to (void) to ensure the argument is used, and using a literal 'true' as its value. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/smc91x.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index db7db8ac4ca3..c5ed27c54724 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -540,7 +540,7 @@ static inline void smc_rcv(struct net_device *dev) #define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags) #define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags) #else -#define smc_special_trylock(lock, flags) (flags == flags) +#define smc_special_trylock(lock, flags) ((void)flags, true) #define smc_special_lock(lock, flags) do { flags = 0; } while (0) #define smc_special_unlock(lock, flags) do { flags = 0; } while (0) #endif -- cgit v1.2.3 From d78a1f08458f8118cf42154e55a3006cc17502bc Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Sun, 13 Mar 2016 21:21:48 +0200 Subject: bnx2x: don't wait for Tx completion on recovery When driver has hit a parity event, HW can no longer write to host memory. As a result, Tx completions cannot be written to the host SB memory, and waiting for Tx completions eventually timeout. As driver is willing to delay as much as 1-2 seconds per Tx queue for its draining and this delay is sequential, the time to recover might greatly lengthen needlessly in case the recovery is done under multi-connection traffic. Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index a949783c8fc3..0a9108cd4c45 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3042,8 +3042,12 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link) bnx2x_save_statistics(bp); } - /* wait till consumers catch up with producers in all queues */ - bnx2x_drain_tx_queues(bp); + /* wait till consumers catch up with producers in all queues. + * If we're recovering, FW can't write to host so no reason + * to wait for the queues to complete all Tx. + */ + if (unload_mode != UNLOAD_RECOVERY) + bnx2x_drain_tx_queues(bp); /* if VF indicate to PF this function is going down (PF will delete sp * elements and clear initializations -- cgit v1.2.3 From ad846aa52ca7b7ec6b99a8b13c4ce0bbe7b15b7f Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Mon, 14 Mar 2016 01:09:53 +0300 Subject: sh_eth: do not call netif_start_queue() from sh_eth_dev_init() Iff sh_eth_phy_start() call fails in sh_eth_open(), the netif_start_queue() call done by sh_eth_dev_init() is not undone. In order to deal with that, stop calling netif_start_queue() from there, so that it can be called only when the device is fully opened and sh_eth_dev_init() only deals with the hardware initialization, symmetrically to sh_eth_dev_exit()... Signed-off-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/sh_eth.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 8385d927adc3..004e2d7560fd 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1312,8 +1312,6 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) if (start) { /* Setting the Rx mode will start the Rx process. */ sh_eth_write(ndev, EDRRR_R, EDRRR); - - netif_start_queue(ndev); } return ret; @@ -2261,6 +2259,8 @@ static int sh_eth_open(struct net_device *ndev) if (ret) goto out_free_irq; + netif_start_queue(ndev); + mdp->is_opened = 1; return ret; @@ -2304,6 +2304,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) /* device init */ sh_eth_dev_init(ndev, true); + + netif_start_queue(ndev); } /* Packet transmit function */ -- cgit v1.2.3 From 1bddd96cba03da0a14b3e5144e98c9a6ff17e983 Mon Sep 17 00:00:00 2001 From: Caesar Wang Date: Mon, 14 Mar 2016 16:01:54 +0800 Subject: net: arc_emac: support the phy reset for emac driver This patch adds to support the emac phy reset. Different boards may require different phy reset duration. Add property phy-reset-duration for emac driver, so that the boards that need a longer reset duration can specify it in their device tree. Signed-off-by: Heiko Stuebner Signed-off-by: Caesar Wang Cc: "David S. Miller" Cc: netdev@vger.kernel.org Cc: Alexander Kochetkov Cc: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac.h | 6 ++++++ drivers/net/ethernet/arc/emac_mdio.c | 37 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index dae1ac300a49..1a4040397a4b 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -102,6 +102,11 @@ struct buffer_state { DEFINE_DMA_UNMAP_LEN(len); }; +struct arc_emac_mdio_bus_data { + struct gpio_desc *reset_gpio; + int msec; +}; + /** * struct arc_emac_priv - Storage of EMAC's private information. * @dev: Pointer to the current device. @@ -131,6 +136,7 @@ struct arc_emac_priv { struct device *dev; struct phy_device *phy_dev; struct mii_bus *bus; + struct arc_emac_mdio_bus_data bus_data; void __iomem *regs; struct clk *clk; diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index d5ee986936da..caf704264eba 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "emac.h" @@ -98,6 +99,25 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, return arc_mdio_complete_wait(priv); } +/** + * arc_mdio_reset + * @bus: points to the mii_bus structure + * Description: reset the MII bus + */ +int arc_mdio_reset(struct mii_bus *bus) +{ + struct arc_emac_priv *priv = bus->priv; + struct arc_emac_mdio_bus_data *data = &priv->bus_data; + + if (data->reset_gpio) { + gpiod_set_value_cansleep(data->reset_gpio, 1); + msleep(data->msec); + gpiod_set_value_cansleep(data->reset_gpio, 0); + } + + return 0; +} + /** * arc_mdio_probe - MDIO probe function. * @priv: Pointer to ARC EMAC private data structure. @@ -109,6 +129,8 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, */ int arc_mdio_probe(struct arc_emac_priv *priv) { + struct arc_emac_mdio_bus_data *data = &priv->bus_data; + struct device_node *np = priv->dev->of_node; struct mii_bus *bus; int error; @@ -122,6 +144,21 @@ int arc_mdio_probe(struct arc_emac_priv *priv) bus->name = "Synopsys MII Bus", bus->read = &arc_mdio_read; bus->write = &arc_mdio_write; + bus->reset = &arc_mdio_reset; + + /* optional reset-related properties */ + data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset", + GPIOD_OUT_LOW); + if (IS_ERR(data->reset_gpio)) { + error = PTR_ERR(data->reset_gpio); + dev_err(priv->dev, "Failed to request gpio: %d\n", error); + return error; + } + + of_property_read_u32(np, "phy-reset-duration", &data->msec); + /* A sane reset duration should not be longer than 1s */ + if (data->msec > 1000) + data->msec = 1; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", bus->name); -- cgit v1.2.3 From 663713eb477b9b916426124d6f5d5c5e9db919cf Mon Sep 17 00:00:00 2001 From: Caesar Wang Date: Mon, 14 Mar 2016 16:01:55 +0800 Subject: net: arc: trivial: cleanup the emac driver This patch will make the driver more readability The emac has the error and warnings if you run 'scripts/checkpatch.pl -f --subjective xxx' to check. Let's clean up such trivial details. Signed-off-by: Caesar Wang Cc: Jiri Kosina Cc: "David S. Miller" Cc: Alexander Kochetkov Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/arc/emac.h | 54 +++++++++++++++++--------------- drivers/net/ethernet/arc/emac_main.c | 35 ++++++++++----------- drivers/net/ethernet/arc/emac_mdio.c | 2 +- drivers/net/ethernet/arc/emac_rockchip.c | 41 +++++++++++++++++------- 4 files changed, 75 insertions(+), 57 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h index 1a4040397a4b..ca562bc034c3 100644 --- a/drivers/net/ethernet/arc/emac.h +++ b/drivers/net/ethernet/arc/emac.h @@ -14,36 +14,36 @@ #include /* STATUS and ENABLE Register bit masks */ -#define TXINT_MASK (1<<0) /* Transmit interrupt */ -#define RXINT_MASK (1<<1) /* Receive interrupt */ -#define ERR_MASK (1<<2) /* Error interrupt */ -#define TXCH_MASK (1<<3) /* Transmit chaining error interrupt */ -#define MSER_MASK (1<<4) /* Missed packet counter error */ -#define RXCR_MASK (1<<8) /* RXCRCERR counter rolled over */ -#define RXFR_MASK (1<<9) /* RXFRAMEERR counter rolled over */ -#define RXFL_MASK (1<<10) /* RXOFLOWERR counter rolled over */ -#define MDIO_MASK (1<<12) /* MDIO complete interrupt */ -#define TXPL_MASK (1<<31) /* Force polling of BD by EMAC */ +#define TXINT_MASK (1 << 0) /* Transmit interrupt */ +#define RXINT_MASK (1 << 1) /* Receive interrupt */ +#define ERR_MASK (1 << 2) /* Error interrupt */ +#define TXCH_MASK (1 << 3) /* Transmit chaining error interrupt */ +#define MSER_MASK (1 << 4) /* Missed packet counter error */ +#define RXCR_MASK (1 << 8) /* RXCRCERR counter rolled over */ +#define RXFR_MASK (1 << 9) /* RXFRAMEERR counter rolled over */ +#define RXFL_MASK (1 << 10) /* RXOFLOWERR counter rolled over */ +#define MDIO_MASK (1 << 12) /* MDIO complete interrupt */ +#define TXPL_MASK (1 << 31) /* Force polling of BD by EMAC */ /* CONTROL Register bit masks */ -#define EN_MASK (1<<0) /* VMAC enable */ -#define TXRN_MASK (1<<3) /* TX enable */ -#define RXRN_MASK (1<<4) /* RX enable */ -#define DSBC_MASK (1<<8) /* Disable receive broadcast */ -#define ENFL_MASK (1<<10) /* Enable Full-duplex */ -#define PROM_MASK (1<<11) /* Promiscuous mode */ +#define EN_MASK (1 << 0) /* VMAC enable */ +#define TXRN_MASK (1 << 3) /* TX enable */ +#define RXRN_MASK (1 << 4) /* RX enable */ +#define DSBC_MASK (1 << 8) /* Disable receive broadcast */ +#define ENFL_MASK (1 << 10) /* Enable Full-duplex */ +#define PROM_MASK (1 << 11) /* Promiscuous mode */ /* Buffer descriptor INFO bit masks */ -#define OWN_MASK (1<<31) /* 0-CPU owns buffer, 1-EMAC owns buffer */ -#define FIRST_MASK (1<<16) /* First buffer in chain */ -#define LAST_MASK (1<<17) /* Last buffer in chain */ +#define OWN_MASK (1 << 31) /* 0-CPU or 1-EMAC owns buffer */ +#define FIRST_MASK (1 << 16) /* First buffer in chain */ +#define LAST_MASK (1 << 17) /* Last buffer in chain */ #define LEN_MASK 0x000007FF /* last 11 bits */ -#define CRLS (1<<21) -#define DEFR (1<<22) -#define DROP (1<<23) -#define RTRY (1<<24) -#define LTCL (1<<28) -#define UFLO (1<<29) +#define CRLS (1 << 21) +#define DEFR (1 << 22) +#define DROP (1 << 23) +#define RTRY (1 << 24) +#define LTCL (1 << 28) +#define UFLO (1 << 29) #define FOR_EMAC OWN_MASK #define FOR_CPU 0 @@ -66,7 +66,7 @@ enum { R_MDIO, }; -#define TX_TIMEOUT (400*HZ/1000) /* Transmission timeout */ +#define TX_TIMEOUT (400 * HZ / 1000) /* Transmission timeout */ #define ARC_EMAC_NAPI_WEIGHT 40 /* Workload for NAPI */ @@ -196,6 +196,7 @@ static inline unsigned int arc_reg_get(struct arc_emac_priv *priv, int reg) static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) { unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value | mask); } @@ -211,6 +212,7 @@ static inline void arc_reg_or(struct arc_emac_priv *priv, int reg, int mask) static inline void arc_reg_clr(struct arc_emac_priv *priv, int reg, int mask) { unsigned int value = arc_reg_get(priv, reg); + arc_reg_set(priv, reg, value & ~mask); } diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 6446af1403f7..a3a9392a4954 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -26,7 +26,6 @@ #include "emac.h" - /** * arc_emac_tx_avail - Return the number of available slots in the tx ring. * @priv: Pointer to ARC EMAC private data structure. @@ -66,7 +65,7 @@ static void arc_emac_adjust_link(struct net_device *ndev) if (priv->duplex != phy_dev->duplex) { reg = arc_reg_get(priv, R_CTRL); - if (DUPLEX_FULL == phy_dev->duplex) + if (phy_dev->duplex == DUPLEX_FULL) reg |= ENFL_MASK; else reg &= ~ENFL_MASK; @@ -466,9 +465,9 @@ static int arc_emac_open(struct net_device *ndev) /* Set CONTROL */ arc_reg_set(priv, R_CTRL, - (RX_BD_NUM << 24) | /* RX BD table length */ - (TX_BD_NUM << 16) | /* TX BD table length */ - TXRN_MASK | RXRN_MASK); + (RX_BD_NUM << 24) | /* RX BD table length */ + (TX_BD_NUM << 16) | /* TX BD table length */ + TXRN_MASK | RXRN_MASK); napi_enable(&priv->napi); @@ -533,8 +532,10 @@ static void arc_free_tx_queue(struct net_device *ndev) struct buffer_state *tx_buff = &priv->tx_buff[i]; if (tx_buff->skb) { - dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr), - dma_unmap_len(tx_buff, len), DMA_TO_DEVICE); + dma_unmap_single(&ndev->dev, + dma_unmap_addr(tx_buff, addr), + dma_unmap_len(tx_buff, len), + DMA_TO_DEVICE); /* return the sk_buff to system */ dev_kfree_skb_irq(tx_buff->skb); @@ -562,8 +563,10 @@ static void arc_free_rx_queue(struct net_device *ndev) struct buffer_state *rx_buff = &priv->rx_buff[i]; if (rx_buff->skb) { - dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr), - dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, + dma_unmap_addr(rx_buff, addr), + dma_unmap_len(rx_buff, len), + DMA_FROM_DEVICE); /* return the sk_buff to system */ dev_kfree_skb_irq(rx_buff->skb); @@ -717,8 +720,8 @@ static void arc_emac_set_address_internal(struct net_device *ndev) struct arc_emac_priv *priv = netdev_priv(ndev); unsigned int addr_low, addr_hi; - addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]); - addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]); + addr_low = le32_to_cpu(*(__le32 *)&ndev->dev_addr[0]); + addr_hi = le16_to_cpu(*(__le16 *)&ndev->dev_addr[4]); arc_reg_set(priv, R_ADDRL, addr_low); arc_reg_set(priv, R_ADDRH, addr_hi); @@ -774,7 +777,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) unsigned int id, clock_frequency, irq; int err; - /* Get PHY from device tree */ phy_node = of_parse_phandle(dev->of_node, "phy", 0); if (!phy_node) { @@ -796,7 +798,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) return -ENODEV; } - ndev->netdev_ops = &arc_emac_netdev_ops; ndev->ethtool_ops = &arc_emac_ethtool_ops; ndev->watchdog_timeo = TX_TIMEOUT; @@ -807,9 +808,9 @@ int arc_emac_probe(struct net_device *ndev, int interface) priv->dev = dev; priv->regs = devm_ioremap_resource(dev, &res_regs); - if (IS_ERR(priv->regs)) { + if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); - } + dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs); if (priv->clk) { @@ -930,10 +931,8 @@ int arc_emac_remove(struct net_device *ndev) unregister_netdev(ndev); netif_napi_del(&priv->napi); - if (!IS_ERR(priv->clk)) { + if (!IS_ERR(priv->clk)) clk_disable_unprepare(priv->clk); - } - return 0; } diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c index caf704264eba..16419f550eff 100644 --- a/drivers/net/ethernet/arc/emac_mdio.c +++ b/drivers/net/ethernet/arc/emac_mdio.c @@ -94,7 +94,7 @@ static int arc_mdio_write(struct mii_bus *bus, int phy_addr, phy_addr, reg_num, value); arc_reg_set(priv, R_MDIO, - 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); + 0x50020000 | (phy_addr << 23) | (reg_num << 18) | value); return arc_mdio_complete_wait(priv); } diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index 85e821ccfcd2..e278e3d96ee0 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -50,7 +50,7 @@ static void emac_rockchip_set_mac_speed(void *priv, unsigned int speed) u32 data; int err = 0; - switch(speed) { + switch (speed) { case 10: data = (1 << (speed_offset + 16)) | (0 << speed_offset); break; @@ -83,9 +83,18 @@ static const struct emac_rockchip_soc_data emac_rk3188_emac_data = { }; static const struct of_device_id emac_rockchip_dt_ids[] = { - { .compatible = "rockchip,rk3036-emac", .data = &emac_rk3036_emac_data }, - { .compatible = "rockchip,rk3066-emac", .data = &emac_rk3066_emac_data }, - { .compatible = "rockchip,rk3188-emac", .data = &emac_rk3188_emac_data }, + { + .compatible = "rockchip,rk3036-emac", + .data = &emac_rk3036_emac_data, + }, + { + .compatible = "rockchip,rk3066-emac", + .data = &emac_rk3066_emac_data, + }, + { + .compatible = "rockchip,rk3188-emac", + .data = &emac_rk3188_emac_data, + }, { /* Sentinel */ } }; @@ -123,9 +132,11 @@ static int emac_rockchip_probe(struct platform_device *pdev) goto out_netdev; } - priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); + priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node, + "rockchip,grf"); if (IS_ERR(priv->grf)) { - dev_err(dev, "failed to retrieve global register file (%ld)\n", PTR_ERR(priv->grf)); + dev_err(dev, "failed to retrieve global register file (%ld)\n", + PTR_ERR(priv->grf)); err = PTR_ERR(priv->grf); goto out_netdev; } @@ -135,14 +146,16 @@ static int emac_rockchip_probe(struct platform_device *pdev) priv->emac.clk = devm_clk_get(dev, "hclk"); if (IS_ERR(priv->emac.clk)) { - dev_err(dev, "failed to retrieve host clock (%ld)\n", PTR_ERR(priv->emac.clk)); + dev_err(dev, "failed to retrieve host clock (%ld)\n", + PTR_ERR(priv->emac.clk)); err = PTR_ERR(priv->emac.clk); goto out_netdev; } priv->refclk = devm_clk_get(dev, "macref"); if (IS_ERR(priv->refclk)) { - dev_err(dev, "failed to retrieve reference clock (%ld)\n", PTR_ERR(priv->refclk)); + dev_err(dev, "failed to retrieve reference clock (%ld)\n", + PTR_ERR(priv->refclk)); err = PTR_ERR(priv->refclk); goto out_netdev; } @@ -179,19 +192,22 @@ static int emac_rockchip_probe(struct platform_device *pdev) err = regmap_write(priv->grf, priv->soc_data->grf_offset, data); if (err) { - dev_err(dev, "unable to apply initial settings to grf (%d)\n", err); + dev_err(dev, "unable to apply initial settings to grf (%d)\n", + err); goto out_regulator_disable; } /* RMII interface needs always a rate of 50MHz */ err = clk_set_rate(priv->refclk, 50000000); if (err) - dev_err(dev, "failed to change reference clock rate (%d)\n", err); + dev_err(dev, + "failed to change reference clock rate (%d)\n", err); if (priv->soc_data->need_div_macclk) { priv->macclk = devm_clk_get(dev, "macclk"); if (IS_ERR(priv->macclk)) { - dev_err(dev, "failed to retrieve mac clock (%ld)\n", PTR_ERR(priv->macclk)); + dev_err(dev, "failed to retrieve mac clock (%ld)\n", + PTR_ERR(priv->macclk)); err = PTR_ERR(priv->macclk); goto out_regulator_disable; } @@ -205,7 +221,8 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* RMII TX/RX needs always a rate of 25MHz */ err = clk_set_rate(priv->macclk, 25000000); if (err) - dev_err(dev, "failed to change mac clock rate (%d)\n", err); + dev_err(dev, + "failed to change mac clock rate (%d)\n", err); } err = arc_emac_probe(ndev, interface); -- cgit v1.2.3 From e764b93924b47cd53b818c1cf8708a35bdfbb83d Mon Sep 17 00:00:00 2001 From: Xing Zheng Date: Mon, 14 Mar 2016 16:01:57 +0800 Subject: clk: rockchip: associate the rk3036 HCLK_EMAC clock-id Associate the new clock id the clock. Signed-off-by: Xing Zheng Signed-off-by: Caesar Wang Cc: Xing Zheng Cc: Michael Turquette Cc: Heiko Stuebner Cc: Stephen Boyd Cc: linux-clk@vger.kernel.org Cc: linux-rockchip@lists.infradead.org Signed-off-by: David S. Miller --- drivers/clk/rockchip/clk-rk3036.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index bc7fbac83ab7..37f42929598d 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -404,7 +404,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { GATE(HCLK_OTG1, "hclk_otg1", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(7), 3, GFLAGS), GATE(HCLK_I2S, "hclk_i2s", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS), GATE(0, "hclk_sfc", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 14, GFLAGS), - GATE(0, "hclk_mac", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(3), 15, GFLAGS), + GATE(HCLK_MAC, "hclk_mac", "hclk_peri", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS), /* pclk_peri gates */ GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS), -- cgit v1.2.3 From 2c6fae2501d87ca94b5249df38797f02d4e39add Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Mon, 14 Mar 2016 16:01:59 +0800 Subject: clk: rockchip: associate SCLK_MAC_PLL and disable reparenting on rk3036 The emac needs constant and very specific rate but the possible PLL-sources are very limited, so we expect the PLL source to be set manually on per board and don't want it to get changed in an automatic way later. So add the necessary clock-id and disable reparenting on set_rate calls. Signed-off-by: Heiko Stuebner Cc: Michael Turquette Cc: Heiko Stuebner Cc: Stephen Boyd Cc: linux-clk@vger.kernel.org Signed-off-by: Caesar Wang Signed-off-by: David S. Miller --- drivers/clk/rockchip/clk-rk3036.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index 37f42929598d..53e9c39f5103 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c @@ -343,7 +343,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = { RK2928_CLKSEL_CON(16), 0, 2, MFLAGS, 2, 5, DFLAGS, RK2928_CLKGATE_CON(10), 5, GFLAGS), - COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, + COMPOSITE_NOGATE(SCLK_MACPLL, "mac_pll_src", mux_pll_src_3plls_p, CLK_SET_RATE_NO_REPARENT, RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS), MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), -- cgit v1.2.3 From e8e56ffd9d2973398b60ece1f1bebb8d67b4d032 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 14 Mar 2016 21:17:16 +0100 Subject: ppp: ensure file->private_data can't be overridden Locking ppp_mutex must be done before dereferencing file->private_data, otherwise it could be modified before ppp_unattached_ioctl() takes the lock. This could lead ppp_unattached_ioctl() to override ->private_data, thus leaking reference to the ppp_file previously pointed to. v2: lock all ppp_ioctl() instead of just checking private_data in ppp_unattached_ioctl(), to avoid ambiguous behaviour. Fixes: f3ff8a4d80e8 ("ppp: push BKL down into the driver") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/ppp_generic.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 931836e09a6b..4fd861063ed4 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -575,7 +575,7 @@ static int get_filter(void __user *arg, struct sock_filter **p) static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - struct ppp_file *pf = file->private_data; + struct ppp_file *pf; struct ppp *ppp; int err = -EFAULT, val, val2, i; struct ppp_idle idle; @@ -585,9 +585,14 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) void __user *argp = (void __user *)arg; int __user *p = argp; - if (!pf) - return ppp_unattached_ioctl(current->nsproxy->net_ns, - pf, file, cmd, arg); + mutex_lock(&ppp_mutex); + + pf = file->private_data; + if (!pf) { + err = ppp_unattached_ioctl(current->nsproxy->net_ns, + pf, file, cmd, arg); + goto out; + } if (cmd == PPPIOCDETACH) { /* @@ -602,7 +607,6 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) * this fd and reopening /dev/ppp. */ err = -EINVAL; - mutex_lock(&ppp_mutex); if (pf->kind == INTERFACE) { ppp = PF_TO_PPP(pf); rtnl_lock(); @@ -616,15 +620,13 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } else pr_warn("PPPIOCDETACH file->f_count=%ld\n", atomic_long_read(&file->f_count)); - mutex_unlock(&ppp_mutex); - return err; + goto out; } if (pf->kind == CHANNEL) { struct channel *pch; struct ppp_channel *chan; - mutex_lock(&ppp_mutex); pch = PF_TO_CHANNEL(pf); switch (cmd) { @@ -646,17 +648,16 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) err = chan->ops->ioctl(chan, cmd, arg); up_read(&pch->chan_sem); } - mutex_unlock(&ppp_mutex); - return err; + goto out; } if (pf->kind != INTERFACE) { /* can't happen */ pr_err("PPP: not interface or channel??\n"); - return -EINVAL; + err = -EINVAL; + goto out; } - mutex_lock(&ppp_mutex); ppp = PF_TO_PPP(pf); switch (cmd) { case PPPIOCSMRU: @@ -831,7 +832,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) default: err = -ENOTTY; } + +out: mutex_unlock(&ppp_mutex); + return err; } @@ -844,7 +848,6 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct ppp_net *pn; int __user *p = (int __user *)arg; - mutex_lock(&ppp_mutex); switch (cmd) { case PPPIOCNEWUNIT: /* Create a new ppp unit */ @@ -894,7 +897,7 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, default: err = -ENOTTY; } - mutex_unlock(&ppp_mutex); + return err; } -- cgit v1.2.3 From 7091f01e8cf6989e63c4eacb59b654fcff057901 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 14 Mar 2016 17:30:37 -0700 Subject: phy: mdio-cavium: Add missing MODULE_* annotations. When the code was factored out of mdio-octeon.c, the MODULE_DESCRIPTION, MODULE_AUTHOR and MODULE_LICENSE annotations were inadvertently omitted. Restore them so that we don't get kernel taint warnings upon module loading. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/phy/mdio-cavium.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/phy/mdio-cavium.c index e796ee121eac..6df2fa755bb4 100644 --- a/drivers/net/phy/mdio-cavium.c +++ b/drivers/net/phy/mdio-cavium.c @@ -147,3 +147,7 @@ int cavium_mdiobus_write(struct mii_bus *bus, int phy_id, int regnum, u16 val) return 0; } EXPORT_SYMBOL(cavium_mdiobus_write); + +MODULE_DESCRIPTION("Common code for OCTEON and Thunder MDIO bus drivers"); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); -- cgit v1.2.3 From 9277a4f875adbeeb6209c0a3e3cf04c752522b2e Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 14 Mar 2016 17:30:38 -0700 Subject: net: cavium: For Kconfig THUNDER_NIC_BGX, select MDIO_THUNDER. Previously we selected MDIO_OCTEON, which after creating the Thunder specific MDIO bus driver is much less useful. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 8fb84e69c30e..953aa408c384 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -35,7 +35,7 @@ config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" depends on 64BIT select PHYLIB - select MDIO_OCTEON + select MDIO_THUNDER ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. -- cgit v1.2.3 From b7d3e3d3d21a09f8e45043d548224000a8efe06a Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 14 Mar 2016 17:30:39 -0700 Subject: net: thunderx: Don't leak phy device references on -EPROBE_DEFER condition. It is possible, although unlikely, that probing will find the phy_device for the first LMAC of a thunder BGX device, but then need to fail with -EPROBE_DEFER on a subsequent LMAC. In this case, we need to call put_device() on each of the phy_devices that were obtained, but will be unused due to returning -EPROBE_DEFER. Also, since we can break out of the probing loop early, we need to explicitly call of_node_put() outside of the loop. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 26 +++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index feed2318201b..967951582e03 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -974,17 +974,18 @@ static int bgx_init_acpi_phy(struct bgx *bgx) static int bgx_init_of_phy(struct bgx *bgx) { struct fwnode_handle *fwn; + struct device_node *node = NULL; u8 lmac = 0; - const char *mac; device_for_each_child_node(&bgx->pdev->dev, fwn) { struct phy_device *pd; struct device_node *phy_np; - struct device_node *node = to_of_node(fwn); + const char *mac; /* Should always be an OF node. But if it is not, we * cannot handle it, so exit the loop. */ + node = to_of_node(fwn); if (!node) break; @@ -1005,17 +1006,30 @@ static int bgx_init_of_phy(struct bgx *bgx) /* Wait until the phy drivers are available */ pd = of_phy_find_device(phy_np); if (!pd) - return -EPROBE_DEFER; + goto defer; bgx->lmac[lmac].phydev = pd; } lmac++; - if (lmac == MAX_LMAC_PER_BGX) { - of_node_put(node); + if (lmac == MAX_LMAC_PER_BGX) break; - } } + of_node_put(node); return 0; + +defer: + /* We are bailing out, try not to leak device reference counts + * for phy devices we may have already found. + */ + while (lmac) { + if (bgx->lmac[lmac].phydev) { + put_device(&bgx->lmac[lmac].phydev->mdio.dev); + bgx->lmac[lmac].phydev = NULL; + } + lmac--; + } + of_node_put(node); + return -EPROBE_DEFER; } #else -- cgit v1.2.3 From f6773c5e95a6dc1af82157d4e96e412dee3abf31 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 Mar 2016 21:59:49 -0700 Subject: vlan: propagate gso_max_segs vlan drivers lack proper propagation of gso_max_segs from lower device. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- drivers/net/ipvlan/ipvlan_main.c | 2 ++ drivers/net/macvlan.c | 2 ++ net/8021q/vlan.c | 1 + net/8021q/vlan_dev.c | 1 + 4 files changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 5802b9025765..57941d3f4227 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -119,6 +119,7 @@ static int ipvlan_init(struct net_device *dev) dev->features = phy_dev->features & IPVLAN_FEATURES; dev->features |= NETIF_F_LLTX; dev->gso_max_size = phy_dev->gso_max_size; + dev->gso_max_segs = phy_dev->gso_max_segs; dev->hard_header_len = phy_dev->hard_header_len; ipvlan_set_lockdep_class(dev); @@ -592,6 +593,7 @@ static int ipvlan_device_event(struct notifier_block *unused, list_for_each_entry(ipvlan, &port->ipvlans, pnode) { ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; + ipvlan->dev->gso_max_segs = dev->gso_max_segs; netdev_features_change(ipvlan->dev); } break; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6e953e3a460a..2bcf1f321bea 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -803,6 +803,7 @@ static int macvlan_init(struct net_device *dev) dev->hw_features |= NETIF_F_LRO; dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES; dev->gso_max_size = lowerdev->gso_max_size; + dev->gso_max_segs = lowerdev->gso_max_segs; dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); @@ -1532,6 +1533,7 @@ static int macvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { vlan->dev->gso_max_size = dev->gso_max_size; + vlan->dev->gso_max_segs = dev->gso_max_segs; netdev_update_features(vlan->dev); } break; diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 69929c05c843..a1e273af6fc8 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -311,6 +311,7 @@ static void vlan_transfer_features(struct net_device *dev, struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev); vlandev->gso_max_size = dev->gso_max_size; + vlandev->gso_max_segs = dev->gso_max_segs; if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto)) vlandev->hard_header_len = dev->hard_header_len; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index e416a4038a12..e7e62570bdb8 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -551,6 +551,7 @@ static int vlan_dev_init(struct net_device *dev) dev->features |= real_dev->vlan_features | NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; dev->gso_max_size = real_dev->gso_max_size; + dev->gso_max_segs = real_dev->gso_max_segs; if (dev->features & NETIF_F_VLAN_FEATURES) netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n"); -- cgit v1.2.3 From bd9e33508c5e1eb5d807d11d7bfc52125fcdb04e Mon Sep 17 00:00:00 2001 From: Schemmel Hans-Christoph Date: Thu, 17 Mar 2016 10:07:56 +0000 Subject: qmi_wwan: Added support for Gemalto's Cinterion PHxx WWAN interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added support for Gemalto's Cinterion PHxx WWAN interfaces by adding QMI_FIXED_INTF with Cinterion's VID and PID. PHxx can have: 2 RmNet Interfaces (PID 0x0082) or 1 RmNet + 1 USB Audio interface (PID 0x0083). Signed-off-by: Hans-Christoph Schemmel Acked-by: Bjørn Mork Signed-off-by: David S. Miller --- drivers/net/usb/qmi_wwan.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers') diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index a3a4ccf7cf52..7d717c66bcb0 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -881,6 +881,9 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ + {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ -- cgit v1.2.3 From d07a147f00de303c799af3427d4e2d612902a7d3 Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 14 Mar 2016 17:57:08 -0700 Subject: netdev: Move octeon/octeon_mgmt driver to cavium directory. No code changes. Since OCTEON is a Cavium product, move the driver to the vendor directory to unclutter things a bit. Signed-off-by: David Daney Signed-off-by: David S. Miller --- drivers/net/ethernet/Kconfig | 1 - drivers/net/ethernet/Makefile | 1 - drivers/net/ethernet/cavium/Kconfig | 11 + drivers/net/ethernet/cavium/Makefile | 1 + drivers/net/ethernet/cavium/octeon/Makefile | 5 + drivers/net/ethernet/cavium/octeon/octeon_mgmt.c | 1593 ++++++++++++++++++++++ drivers/net/ethernet/octeon/Kconfig | 14 - drivers/net/ethernet/octeon/Makefile | 5 - drivers/net/ethernet/octeon/octeon_mgmt.c | 1593 ---------------------- 9 files changed, 1610 insertions(+), 1614 deletions(-) create mode 100644 drivers/net/ethernet/cavium/octeon/Makefile create mode 100644 drivers/net/ethernet/cavium/octeon/octeon_mgmt.c delete mode 100644 drivers/net/ethernet/octeon/Kconfig delete mode 100644 drivers/net/ethernet/octeon/Makefile delete mode 100644 drivers/net/ethernet/octeon/octeon_mgmt.c (limited to 'drivers') diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index be67a19e01b9..2ffd63463299 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -139,7 +139,6 @@ config NET_NETX source "drivers/net/ethernet/nuvoton/Kconfig" source "drivers/net/ethernet/nvidia/Kconfig" source "drivers/net/ethernet/nxp/Kconfig" -source "drivers/net/ethernet/octeon/Kconfig" source "drivers/net/ethernet/oki-semi/Kconfig" config ETHOC diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 6ffcc801d37e..1d349e9aa9a6 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -59,7 +59,6 @@ obj-$(CONFIG_NET_NETX) += netx-eth.o obj-$(CONFIG_NET_VENDOR_NUVOTON) += nuvoton/ obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/ obj-$(CONFIG_LPC_ENET) += nxp/ -obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/ obj-$(CONFIG_ETHOC) += ethoc.o obj-$(CONFIG_NET_PACKET_ENGINE) += packetengines/ diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 953aa408c384..0ef232d3331e 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -53,4 +53,15 @@ config LIQUIDIO To compile this driver as a module, choose M here: the module will be called liquidio. This is recommended. +config OCTEON_MGMT_ETHERNET + tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" + depends on CAVIUM_OCTEON_SOC + select PHYLIB + select MDIO_OCTEON + default y + help + Enable the ethernet driver for the management + port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, + CN54XX, CN52XX, and CN6XXX chips. + endif # NET_VENDOR_CAVIUM diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile index d22f886ac291..872da9f7c31a 100644 --- a/drivers/net/ethernet/cavium/Makefile +++ b/drivers/net/ethernet/cavium/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/ +obj-$(CONFIG_NET_VENDOR_CAVIUM) += octeon/ diff --git a/drivers/net/ethernet/cavium/octeon/Makefile b/drivers/net/ethernet/cavium/octeon/Makefile new file mode 100644 index 000000000000..efa41c1d91c5 --- /dev/null +++ b/drivers/net/ethernet/cavium/octeon/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the Cavium network device drivers. +# + +obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c new file mode 100644 index 000000000000..c177c7cec13b --- /dev/null +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -0,0 +1,1593 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2009-2012 Cavium, Inc + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "octeon_mgmt" +#define DRV_VERSION "2.0" +#define DRV_DESCRIPTION \ + "Cavium Networks Octeon MII (management) port Network Driver" + +#define OCTEON_MGMT_NAPI_WEIGHT 16 + +/* Ring sizes that are powers of two allow for more efficient modulo + * opertions. + */ +#define OCTEON_MGMT_RX_RING_SIZE 512 +#define OCTEON_MGMT_TX_RING_SIZE 128 + +/* Allow 8 bytes for vlan and FCS. */ +#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) + +union mgmt_port_ring_entry { + u64 d64; + struct { +#define RING_ENTRY_CODE_DONE 0xf +#define RING_ENTRY_CODE_MORE 0x10 +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved_62_63:2; + /* Length of the buffer/packet in bytes */ + u64 len:14; + /* For TX, signals that the packet should be timestamped */ + u64 tstamp:1; + /* The RX error code */ + u64 code:7; + /* Physical address of the buffer */ + u64 addr:40; +#else + u64 addr:40; + u64 code:7; + u64 tstamp:1; + u64 len:14; + u64 reserved_62_63:2; +#endif + } s; +}; + +#define MIX_ORING1 0x0 +#define MIX_ORING2 0x8 +#define MIX_IRING1 0x10 +#define MIX_IRING2 0x18 +#define MIX_CTL 0x20 +#define MIX_IRHWM 0x28 +#define MIX_IRCNT 0x30 +#define MIX_ORHWM 0x38 +#define MIX_ORCNT 0x40 +#define MIX_ISR 0x48 +#define MIX_INTENA 0x50 +#define MIX_REMCNT 0x58 +#define MIX_BIST 0x78 + +#define AGL_GMX_PRT_CFG 0x10 +#define AGL_GMX_RX_FRM_CTL 0x18 +#define AGL_GMX_RX_FRM_MAX 0x30 +#define AGL_GMX_RX_JABBER 0x38 +#define AGL_GMX_RX_STATS_CTL 0x50 + +#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 +#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 +#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 + +#define AGL_GMX_RX_ADR_CTL 0x100 +#define AGL_GMX_RX_ADR_CAM_EN 0x108 +#define AGL_GMX_RX_ADR_CAM0 0x180 +#define AGL_GMX_RX_ADR_CAM1 0x188 +#define AGL_GMX_RX_ADR_CAM2 0x190 +#define AGL_GMX_RX_ADR_CAM3 0x198 +#define AGL_GMX_RX_ADR_CAM4 0x1a0 +#define AGL_GMX_RX_ADR_CAM5 0x1a8 + +#define AGL_GMX_TX_CLK 0x208 +#define AGL_GMX_TX_STATS_CTL 0x268 +#define AGL_GMX_TX_CTL 0x270 +#define AGL_GMX_TX_STAT0 0x280 +#define AGL_GMX_TX_STAT1 0x288 +#define AGL_GMX_TX_STAT2 0x290 +#define AGL_GMX_TX_STAT3 0x298 +#define AGL_GMX_TX_STAT4 0x2a0 +#define AGL_GMX_TX_STAT5 0x2a8 +#define AGL_GMX_TX_STAT6 0x2b0 +#define AGL_GMX_TX_STAT7 0x2b8 +#define AGL_GMX_TX_STAT8 0x2c0 +#define AGL_GMX_TX_STAT9 0x2c8 + +struct octeon_mgmt { + struct net_device *netdev; + u64 mix; + u64 agl; + u64 agl_prt_ctl; + int port; + int irq; + bool has_rx_tstamp; + u64 *tx_ring; + dma_addr_t tx_ring_handle; + unsigned int tx_next; + unsigned int tx_next_clean; + unsigned int tx_current_fill; + /* The tx_list lock also protects the ring related variables */ + struct sk_buff_head tx_list; + + /* RX variables only touched in napi_poll. No locking necessary. */ + u64 *rx_ring; + dma_addr_t rx_ring_handle; + unsigned int rx_next; + unsigned int rx_next_fill; + unsigned int rx_current_fill; + struct sk_buff_head rx_list; + + spinlock_t lock; + unsigned int last_duplex; + unsigned int last_link; + unsigned int last_speed; + struct device *dev; + struct napi_struct napi; + struct tasklet_struct tx_clean_tasklet; + struct phy_device *phydev; + struct device_node *phy_np; + resource_size_t mix_phys; + resource_size_t mix_size; + resource_size_t agl_phys; + resource_size_t agl_size; + resource_size_t agl_prt_ctl_phys; + resource_size_t agl_prt_ctl_size; +}; + +static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) +{ + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); + mix_intena.s.ithena = enable ? 1 : 0; + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) +{ + union cvmx_mixx_intena mix_intena; + unsigned long flags; + + spin_lock_irqsave(&p->lock, flags); + mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); + mix_intena.s.othena = enable ? 1 : 0; + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); + spin_unlock_irqrestore(&p->lock, flags); +} + +static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 1); +} + +static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_rx_irq(p, 0); +} + +static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 1); +} + +static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) +{ + octeon_mgmt_set_tx_irq(p, 0); +} + +static unsigned int ring_max_fill(unsigned int ring_size) +{ + return ring_size - 8; +} + +static unsigned int ring_size_to_bytes(unsigned int ring_size) +{ + return ring_size * sizeof(union mgmt_port_ring_entry); +} + +static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { + unsigned int size; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + + /* CN56XX pass 1 needs 8 bytes of padding. */ + size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; + + skb = netdev_alloc_skb(netdev, size); + if (!skb) + break; + skb_reserve(skb, NET_IP_ALIGN); + __skb_queue_tail(&p->rx_list, skb); + + re.d64 = 0; + re.s.len = size; + re.s.addr = dma_map_single(p->dev, skb->data, + size, + DMA_FROM_DEVICE); + + /* Put it in the ring. */ + p->rx_ring[p->rx_next_fill] = re.d64; + dma_sync_single_for_device(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->rx_next_fill = + (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill++; + /* Ring the bell. */ + cvmx_write_csr(p->mix + MIX_IRING2, 1); + } +} + +static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) +{ + union cvmx_mixx_orcnt mix_orcnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + int cleaned = 0; + unsigned long flags; + + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); + while (mix_orcnt.s.orcnt) { + spin_lock_irqsave(&p->tx_list.lock, flags); + + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); + + if (mix_orcnt.s.orcnt == 0) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + break; + } + + dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + re.d64 = p->tx_ring[p->tx_next_clean]; + p->tx_next_clean = + (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; + skb = __skb_dequeue(&p->tx_list); + + mix_orcnt.u64 = 0; + mix_orcnt.s.orcnt = 1; + + /* Acknowledge to hardware that we have the buffer. */ + cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); + p->tx_current_fill--; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + + /* Read the hardware TX timestamp if one was recorded */ + if (unlikely(re.s.tstamp)) { + struct skb_shared_hwtstamps ts; + u64 ns; + + memset(&ts, 0, sizeof(ts)); + /* Read the timestamp */ + ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); + /* Remove the timestamp from the FIFO */ + cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); + /* Tell the kernel about the timestamp */ + ts.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(skb, &ts); + } + + dev_kfree_skb_any(skb); + cleaned++; + + mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); + } + + if (cleaned && netif_queue_stopped(p->netdev)) + netif_wake_queue(p->netdev); +} + +static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) +{ + struct octeon_mgmt *p = (struct octeon_mgmt *)arg; + octeon_mgmt_clean_tx_buffers(p); + octeon_mgmt_enable_tx_irq(p); +} + +static void octeon_mgmt_update_rx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + unsigned long flags; + u64 drop, bad; + + /* These reads also clear the count registers. */ + drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); + bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); + + if (drop || bad) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.rx_errors += bad; + netdev->stats.rx_dropped += drop; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +static void octeon_mgmt_update_tx_stats(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + unsigned long flags; + + union cvmx_agl_gmx_txx_stat0 s0; + union cvmx_agl_gmx_txx_stat1 s1; + + /* These reads also clear the count registers. */ + s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); + s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); + + if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { + /* Do an atomic update. */ + spin_lock_irqsave(&p->lock, flags); + netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; + netdev->stats.collisions += s1.s.scol + s1.s.mcol; + spin_unlock_irqrestore(&p->lock, flags); + } +} + +/* + * Dequeue a receive skb and its corresponding ring entry. The ring + * entry is returned, *pskb is updated to point to the skb. + */ +static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, + struct sk_buff **pskb) +{ + union mgmt_port_ring_entry re; + + dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + re.d64 = p->rx_ring[p->rx_next]; + p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; + p->rx_current_fill--; + *pskb = __skb_dequeue(&p->rx_list); + + dma_unmap_single(p->dev, re.s.addr, + ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, + DMA_FROM_DEVICE); + + return re.d64; +} + + +static int octeon_mgmt_receive_one(struct octeon_mgmt *p) +{ + struct net_device *netdev = p->netdev; + union cvmx_mixx_ircnt mix_ircnt; + union mgmt_port_ring_entry re; + struct sk_buff *skb; + struct sk_buff *skb2; + struct sk_buff *skb_new; + union mgmt_port_ring_entry re2; + int rc = 1; + + + re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); + if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { + /* A good packet, send it up. */ + skb_put(skb, re.s.len); +good: + /* Process the RX timestamp if it was recorded */ + if (p->has_rx_tstamp) { + /* The first 8 bytes are the timestamp */ + u64 ns = *(u64 *)skb->data; + struct skb_shared_hwtstamps *ts; + ts = skb_hwtstamps(skb); + ts->hwtstamp = ns_to_ktime(ns); + __skb_pull(skb, 8); + } + skb->protocol = eth_type_trans(skb, netdev); + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += skb->len; + netif_receive_skb(skb); + rc = 0; + } else if (re.s.code == RING_ENTRY_CODE_MORE) { + /* Packet split across skbs. This can happen if we + * increase the MTU. Buffers that are already in the + * rx ring can then end up being too small. As the rx + * ring is refilled, buffers sized for the new MTU + * will be used and we should go back to the normal + * non-split case. + */ + skb_put(skb, re.s.len); + do { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + if (re2.s.code != RING_ENTRY_CODE_MORE + && re2.s.code != RING_ENTRY_CODE_DONE) + goto split_error; + skb_put(skb2, re2.s.len); + skb_new = skb_copy_expand(skb, 0, skb2->len, + GFP_ATOMIC); + if (!skb_new) + goto split_error; + if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), + skb2->len)) + goto split_error; + skb_put(skb_new, skb2->len); + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + skb = skb_new; + } while (re2.s.code == RING_ENTRY_CODE_MORE); + goto good; + } else { + /* Some other error, discard it. */ + dev_kfree_skb_any(skb); + /* Error statistics are accumulated in + * octeon_mgmt_update_rx_stats. + */ + } + goto done; +split_error: + /* Discard the whole mess. */ + dev_kfree_skb_any(skb); + dev_kfree_skb_any(skb2); + while (re2.s.code == RING_ENTRY_CODE_MORE) { + re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); + dev_kfree_skb_any(skb2); + } + netdev->stats.rx_errors++; + +done: + /* Tell the hardware we processed a packet. */ + mix_ircnt.u64 = 0; + mix_ircnt.s.ircnt = 1; + cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); + return rc; +} + +static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) +{ + unsigned int work_done = 0; + union cvmx_mixx_ircnt mix_ircnt; + int rc; + + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); + while (work_done < budget && mix_ircnt.s.ircnt) { + + rc = octeon_mgmt_receive_one(p); + if (!rc) + work_done++; + + /* Check for more packets. */ + mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); + } + + octeon_mgmt_rx_fill_ring(p->netdev); + + return work_done; +} + +static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); + struct net_device *netdev = p->netdev; + unsigned int work_done = 0; + + work_done = octeon_mgmt_receive_packets(p, budget); + + if (work_done < budget) { + /* We stopped because no more packets were available. */ + napi_complete(napi); + octeon_mgmt_enable_rx_irq(p); + } + octeon_mgmt_update_rx_stats(netdev); + + return work_done; +} + +/* Reset the hardware to clean state. */ +static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) +{ + union cvmx_mixx_ctl mix_ctl; + union cvmx_mixx_bist mix_bist; + union cvmx_agl_gmx_bist agl_gmx_bist; + + mix_ctl.u64 = 0; + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); + } while (mix_ctl.s.busy); + mix_ctl.s.reset = 1; + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + cvmx_read_csr(p->mix + MIX_CTL); + octeon_io_clk_delay(64); + + mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); + if (mix_bist.u64) + dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", + (unsigned long long)mix_bist.u64); + + agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); + if (agl_gmx_bist.u64) + dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", + (unsigned long long)agl_gmx_bist.u64); +} + +struct octeon_mgmt_cam_state { + u64 cam[6]; + u64 cam_mask; + int cam_index; +}; + +static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, + unsigned char *addr) +{ + int i; + + for (i = 0; i < 6; i++) + cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); + cs->cam_mask |= (1ULL << cs->cam_index); + cs->cam_index++; +} + +static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; + union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; + unsigned long flags; + unsigned int prev_packet_enable; + unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ + unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ + struct octeon_mgmt_cam_state cam_state; + struct netdev_hw_addr *ha; + int available_cam_entries; + + memset(&cam_state, 0, sizeof(cam_state)); + + if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { + cam_mode = 0; + available_cam_entries = 8; + } else { + /* One CAM entry for the primary address, leaves seven + * for the secondary addresses. + */ + available_cam_entries = 7 - netdev->uc.count; + } + + if (netdev->flags & IFF_MULTICAST) { + if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || + netdev_mc_count(netdev) > available_cam_entries) + multicast_mode = 2; /* 2 - Accept all multicast. */ + else + multicast_mode = 0; /* 0 - Use CAM. */ + } + + if (cam_mode == 1) { + /* Add primary address. */ + octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); + netdev_for_each_uc_addr(ha, netdev) + octeon_mgmt_cam_state_add(&cam_state, ha->addr); + } + if (multicast_mode == 0) { + netdev_for_each_mc_addr(ha, netdev) + octeon_mgmt_cam_state_add(&cam_state, ha->addr); + } + + spin_lock_irqsave(&p->lock, flags); + + /* Disable packet I/O. */ + agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prev_packet_enable = agl_gmx_prtx.s.en; + agl_gmx_prtx.s.en = 0; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); + + adr_ctl.u64 = 0; + adr_ctl.s.cam_mode = cam_mode; + adr_ctl.s.mcst = multicast_mode; + adr_ctl.s.bcst = 1; /* Allow broadcast */ + + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); + + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); + cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); + + /* Restore packet I/O. */ + agl_gmx_prtx.s.en = prev_packet_enable; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); + + spin_unlock_irqrestore(&p->lock, flags); +} + +static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) +{ + int r = eth_mac_addr(netdev, addr); + + if (r) + return r; + + octeon_mgmt_set_rx_filtering(netdev); + + return 0; +} + +static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; + + /* Limit the MTU to make sure the ethernet packets are between + * 64 bytes and 16383 bytes. + */ + if (size_without_fcs < 64 || size_without_fcs > 16383) { + dev_warn(p->dev, "MTU must be between %d and %d.\n", + 64 - OCTEON_MGMT_RX_HEADROOM, + 16383 - OCTEON_MGMT_RX_HEADROOM); + return -EINVAL; + } + + netdev->mtu = new_mtu; + + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); + cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, + (size_without_fcs + 7) & 0xfff8); + + return 0; +} + +static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct octeon_mgmt *p = netdev_priv(netdev); + union cvmx_mixx_isr mixx_isr; + + mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); + + /* Clear any pending interrupts */ + cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); + cvmx_read_csr(p->mix + MIX_ISR); + + if (mixx_isr.s.irthresh) { + octeon_mgmt_disable_rx_irq(p); + napi_schedule(&p->napi); + } + if (mixx_isr.s.orthresh) { + octeon_mgmt_disable_tx_irq(p); + tasklet_schedule(&p->tx_clean_tasklet); + } + + return IRQ_HANDLED; +} + +static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + struct hwtstamp_config config; + union cvmx_mio_ptp_clock_cfg ptp; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + bool have_hw_timestamps = false; + + if (copy_from_user(&config, rq->ifr_data, sizeof(config))) + return -EFAULT; + + if (config.flags) /* reserved for future extensions */ + return -EINVAL; + + /* Check the status of hardware for tiemstamps */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + /* Get the current state of the PTP clock */ + ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); + if (!ptp.s.ext_clk_en) { + /* The clock has not been configured to use an + * external source. Program it to use the main clock + * reference. + */ + u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); + if (!ptp.s.ptp_en) + cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); + pr_info("PTP Clock: Using sclk reference at %lld Hz\n", + (NSEC_PER_SEC << 32) / clock_comp); + } else { + /* The clock is already programmed to use a GPIO */ + u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); + pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", + ptp.s.ext_clk_in, + (NSEC_PER_SEC << 32) / clock_comp); + } + + /* Enable the clock if it wasn't done already */ + if (!ptp.s.ptp_en) { + ptp.s.ptp_en = 1; + cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); + } + have_hw_timestamps = true; + } + + if (!have_hw_timestamps) + return -EINVAL; + + switch (config.tx_type) { + case HWTSTAMP_TX_OFF: + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_NONE: + p->has_rx_tstamp = false; + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 0; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + p->has_rx_tstamp = have_hw_timestamps; + config.rx_filter = HWTSTAMP_FILTER_ALL; + if (p->has_rx_tstamp) { + rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); + rxx_frm_ctl.s.ptp_mode = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + } + break; + default: + return -ERANGE; + } + + if (copy_to_user(rq->ifr_data, &config, sizeof(config))) + return -EFAULT; + + return 0; +} + +static int octeon_mgmt_ioctl(struct net_device *netdev, + struct ifreq *rq, int cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + switch (cmd) { + case SIOCSHWTSTAMP: + return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); + default: + if (p->phydev) + return phy_mii_ioctl(p->phydev, rq, cmd); + return -EINVAL; + } +} + +static void octeon_mgmt_disable_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + /* Disable GMX before we make any changes. */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prtx_cfg.s.en = 0; + prtx_cfg.s.tx_en = 0; + prtx_cfg.s.rx_en = 0; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + int i; + for (i = 0; i < 10; i++) { + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) + break; + mdelay(1); + i++; + } + } +} + +static void octeon_mgmt_enable_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + /* Restore the GMX enable state only if link is set */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + prtx_cfg.s.tx_en = 1; + prtx_cfg.s.rx_en = 1; + prtx_cfg.s.en = 1; + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); +} + +static void octeon_mgmt_update_link(struct octeon_mgmt *p) +{ + union cvmx_agl_gmx_prtx_cfg prtx_cfg; + + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + + if (!p->phydev->link) + prtx_cfg.s.duplex = 1; + else + prtx_cfg.s.duplex = p->phydev->duplex; + + switch (p->phydev->speed) { + case 10: + prtx_cfg.s.speed = 0; + prtx_cfg.s.slottime = 0; + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.burst = 1; + prtx_cfg.s.speed_msb = 1; + } + break; + case 100: + prtx_cfg.s.speed = 0; + prtx_cfg.s.slottime = 0; + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.burst = 1; + prtx_cfg.s.speed_msb = 0; + } + break; + case 1000: + /* 1000 MBits is only supported on 6XXX chips */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + prtx_cfg.s.speed = 1; + prtx_cfg.s.speed_msb = 0; + /* Only matters for half-duplex */ + prtx_cfg.s.slottime = 1; + prtx_cfg.s.burst = p->phydev->duplex; + } + break; + case 0: /* No link */ + default: + break; + } + + /* Write the new GMX setting with the port still disabled. */ + cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); + + /* Read GMX CFG again to make sure the config is completed. */ + prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); + + if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + union cvmx_agl_gmx_txx_clk agl_clk; + union cvmx_agl_prtx_ctl prtx_ctl; + + prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); + /* MII (both speeds) and RGMII 1000 speed. */ + agl_clk.s.clk_cnt = 1; + if (prtx_ctl.s.mode == 0) { /* RGMII mode */ + if (p->phydev->speed == 10) + agl_clk.s.clk_cnt = 50; + else if (p->phydev->speed == 100) + agl_clk.s.clk_cnt = 5; + } + cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); + } +} + +static void octeon_mgmt_adjust_link(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + unsigned long flags; + int link_changed = 0; + + if (!p->phydev) + return; + + spin_lock_irqsave(&p->lock, flags); + + + if (!p->phydev->link && p->last_link) + link_changed = -1; + + if (p->phydev->link + && (p->last_duplex != p->phydev->duplex + || p->last_link != p->phydev->link + || p->last_speed != p->phydev->speed)) { + octeon_mgmt_disable_link(p); + link_changed = 1; + octeon_mgmt_update_link(p); + octeon_mgmt_enable_link(p); + } + + p->last_link = p->phydev->link; + p->last_speed = p->phydev->speed; + p->last_duplex = p->phydev->duplex; + + spin_unlock_irqrestore(&p->lock, flags); + + if (link_changed != 0) { + if (link_changed > 0) { + pr_info("%s: Link is up - %d/%s\n", netdev->name, + p->phydev->speed, + DUPLEX_FULL == p->phydev->duplex ? + "Full" : "Half"); + } else { + pr_info("%s: Link is down\n", netdev->name); + } + } +} + +static int octeon_mgmt_init_phy(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (octeon_is_simulation() || p->phy_np == NULL) { + /* No PHYs in the simulator. */ + netif_carrier_on(netdev); + return 0; + } + + p->phydev = of_phy_connect(netdev, p->phy_np, + octeon_mgmt_adjust_link, 0, + PHY_INTERFACE_MODE_MII); + + if (!p->phydev) + return -ENODEV; + + return 0; +} + +static int octeon_mgmt_open(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + union cvmx_mixx_ctl mix_ctl; + union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; + union cvmx_mixx_oring1 oring1; + union cvmx_mixx_iring1 iring1; + union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; + union cvmx_mixx_irhwm mix_irhwm; + union cvmx_mixx_orhwm mix_orhwm; + union cvmx_mixx_intena mix_intena; + struct sockaddr sa; + + /* Allocate ring buffers. */ + p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + GFP_KERNEL); + if (!p->tx_ring) + return -ENOMEM; + p->tx_ring_handle = + dma_map_single(p->dev, p->tx_ring, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + p->tx_next = 0; + p->tx_next_clean = 0; + p->tx_current_fill = 0; + + + p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + GFP_KERNEL); + if (!p->rx_ring) + goto err_nomem; + p->rx_ring_handle = + dma_map_single(p->dev, p->rx_ring, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + + p->rx_next = 0; + p->rx_next_fill = 0; + p->rx_current_fill = 0; + + octeon_mgmt_reset_hw(p); + + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); + + /* Bring it out of reset if needed. */ + if (mix_ctl.s.reset) { + mix_ctl.s.reset = 0; + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + do { + mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); + } while (mix_ctl.s.reset); + } + + if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { + agl_gmx_inf_mode.u64 = 0; + agl_gmx_inf_mode.s.en = 1; + cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); + } + if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) + || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { + /* Force compensation values, as they are not + * determined properly by HW + */ + union cvmx_agl_gmx_drv_ctl drv_ctl; + + drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); + if (p->port) { + drv_ctl.s.byp_en1 = 1; + drv_ctl.s.nctl1 = 6; + drv_ctl.s.pctl1 = 6; + } else { + drv_ctl.s.byp_en = 1; + drv_ctl.s.nctl = 6; + drv_ctl.s.pctl = 6; + } + cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); + } + + oring1.u64 = 0; + oring1.s.obase = p->tx_ring_handle >> 3; + oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; + cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); + + iring1.u64 = 0; + iring1.s.ibase = p->rx_ring_handle >> 3; + iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; + cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); + + memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); + octeon_mgmt_set_mac_address(netdev, &sa); + + octeon_mgmt_change_mtu(netdev, netdev->mtu); + + /* Enable the port HW. Packets are not allowed until + * cvmx_mgmt_port_enable() is called. + */ + mix_ctl.u64 = 0; + mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ + mix_ctl.s.en = 1; /* Enable the port */ + mix_ctl.s.nbtarb = 0; /* Arbitration mode */ + /* MII CB-request FIFO programmable high watermark */ + mix_ctl.s.mrq_hwm = 1; +#ifdef __LITTLE_ENDIAN + mix_ctl.s.lendian = 1; +#endif + cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); + + /* Read the PHY to find the mode of the interface. */ + if (octeon_mgmt_init_phy(netdev)) { + dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); + goto err_noirq; + } + + /* Set the mode of the interface, RGMII/MII. */ + if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { + union cvmx_agl_prtx_ctl agl_prtx_ctl; + int rgmii_mode = (p->phydev->supported & + (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; + + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + + /* MII clocks counts are based on the 125Mhz + * reference, which has an 8nS period. So our delays + * need to be multiplied by this factor. + */ +#define NS_PER_PHY_CLK 8 + + /* Take the DLL and clock tree out of reset */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.clkrst = 0; + if (rgmii_mode) { + agl_prtx_ctl.s.dllrst = 0; + agl_prtx_ctl.s.clktx_byp = 0; + } + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ + + /* Wait for the DLL to lock. External 125 MHz + * reference clock must be stable at this point. + */ + ndelay(256 * NS_PER_PHY_CLK); + + /* Enable the interface */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + agl_prtx_ctl.s.enable = 1; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + + /* Read the value back to force the previous write */ + agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); + + /* Enable the compensation controller */ + agl_prtx_ctl.s.comp = 1; + agl_prtx_ctl.s.drv_byp = 0; + cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); + /* Force write out before wait. */ + cvmx_read_csr(p->agl_prt_ctl); + + /* For compensation state to lock. */ + ndelay(1040 * NS_PER_PHY_CLK); + + /* Default Interframe Gaps are too small. Recommended + * workaround is. + * + * AGL_GMX_TX_IFG[IFG1]=14 + * AGL_GMX_TX_IFG[IFG2]=10 + */ + cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); + } + + octeon_mgmt_rx_fill_ring(netdev); + + /* Clear statistics. */ + /* Clear on read. */ + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); + cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); + + cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); + cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); + + /* Clear any pending interrupts */ + cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); + + if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, + netdev)) { + dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); + goto err_noirq; + } + + /* Interrupt every single RX packet */ + mix_irhwm.u64 = 0; + mix_irhwm.s.irhwm = 0; + cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); + + /* Interrupt when we have 1 or more packets to clean. */ + mix_orhwm.u64 = 0; + mix_orhwm.s.orhwm = 0; + cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); + + /* Enable receive and transmit interrupts */ + mix_intena.u64 = 0; + mix_intena.s.ithena = 1; + mix_intena.s.othena = 1; + cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); + + /* Enable packet I/O. */ + + rxx_frm_ctl.u64 = 0; + rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; + rxx_frm_ctl.s.pre_align = 1; + /* When set, disables the length check for non-min sized pkts + * with padding in the client data. + */ + rxx_frm_ctl.s.pad_len = 1; + /* When set, disables the length check for VLAN pkts */ + rxx_frm_ctl.s.vlan_len = 1; + /* When set, PREAMBLE checking is less strict */ + rxx_frm_ctl.s.pre_free = 1; + /* Control Pause Frames can match station SMAC */ + rxx_frm_ctl.s.ctl_smac = 0; + /* Control Pause Frames can match globally assign Multicast address */ + rxx_frm_ctl.s.ctl_mcst = 1; + /* Forward pause information to TX block */ + rxx_frm_ctl.s.ctl_bck = 1; + /* Drop Control Pause Frames */ + rxx_frm_ctl.s.ctl_drp = 1; + /* Strip off the preamble */ + rxx_frm_ctl.s.pre_strp = 1; + /* This port is configured to send PREAMBLE+SFD to begin every + * frame. GMX checks that the PREAMBLE is sent correctly. + */ + rxx_frm_ctl.s.pre_chk = 1; + cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); + + /* Configure the port duplex, speed and enables */ + octeon_mgmt_disable_link(p); + if (p->phydev) + octeon_mgmt_update_link(p); + octeon_mgmt_enable_link(p); + + p->last_link = 0; + p->last_speed = 0; + /* PHY is not present in simulator. The carrier is enabled + * while initializing the phy for simulator, leave it enabled. + */ + if (p->phydev) { + netif_carrier_off(netdev); + phy_start_aneg(p->phydev); + } + + netif_wake_queue(netdev); + napi_enable(&p->napi); + + return 0; +err_noirq: + octeon_mgmt_reset_hw(p); + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); +err_nomem: + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + return -ENOMEM; +} + +static int octeon_mgmt_stop(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + napi_disable(&p->napi); + netif_stop_queue(netdev); + + if (p->phydev) + phy_disconnect(p->phydev); + p->phydev = NULL; + + netif_carrier_off(netdev); + + octeon_mgmt_reset_hw(p); + + free_irq(p->irq, netdev); + + /* dma_unmap is a nop on Octeon, so just free everything. */ + skb_queue_purge(&p->tx_list); + skb_queue_purge(&p->rx_list); + + dma_unmap_single(p->dev, p->rx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->rx_ring); + + dma_unmap_single(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + kfree(p->tx_ring); + + return 0; +} + +static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + union mgmt_port_ring_entry re; + unsigned long flags; + int rv = NETDEV_TX_BUSY; + + re.d64 = 0; + re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); + re.s.len = skb->len; + re.s.addr = dma_map_single(p->dev, skb->data, + skb->len, + DMA_TO_DEVICE); + + spin_lock_irqsave(&p->tx_list.lock, flags); + + if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + netif_stop_queue(netdev); + spin_lock_irqsave(&p->tx_list.lock, flags); + } + + if (unlikely(p->tx_current_fill >= + ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { + spin_unlock_irqrestore(&p->tx_list.lock, flags); + dma_unmap_single(p->dev, re.s.addr, re.s.len, + DMA_TO_DEVICE); + goto out; + } + + __skb_queue_tail(&p->tx_list, skb); + + /* Put it in the ring. */ + p->tx_ring[p->tx_next] = re.d64; + p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; + p->tx_current_fill++; + + spin_unlock_irqrestore(&p->tx_list.lock, flags); + + dma_sync_single_for_device(p->dev, p->tx_ring_handle, + ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), + DMA_BIDIRECTIONAL); + + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += skb->len; + + /* Ring the bell. */ + cvmx_write_csr(p->mix + MIX_ORING2, 1); + + netdev->trans_start = jiffies; + rv = NETDEV_TX_OK; +out: + octeon_mgmt_update_tx_stats(netdev); + return rv; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void octeon_mgmt_poll_controller(struct net_device *netdev) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + octeon_mgmt_receive_packets(p, 16); + octeon_mgmt_update_rx_stats(netdev); +} +#endif + +static void octeon_mgmt_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); + strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); +} + +static int octeon_mgmt_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (p->phydev) + return phy_ethtool_gset(p->phydev, cmd); + + return -EOPNOTSUPP; +} + +static int octeon_mgmt_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct octeon_mgmt *p = netdev_priv(netdev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_ethtool_sset(p->phydev, cmd); + + return -EOPNOTSUPP; +} + +static int octeon_mgmt_nway_reset(struct net_device *dev) +{ + struct octeon_mgmt *p = netdev_priv(dev); + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (p->phydev) + return phy_start_aneg(p->phydev); + + return -EOPNOTSUPP; +} + +static const struct ethtool_ops octeon_mgmt_ethtool_ops = { + .get_drvinfo = octeon_mgmt_get_drvinfo, + .get_settings = octeon_mgmt_get_settings, + .set_settings = octeon_mgmt_set_settings, + .nway_reset = octeon_mgmt_nway_reset, + .get_link = ethtool_op_get_link, +}; + +static const struct net_device_ops octeon_mgmt_ops = { + .ndo_open = octeon_mgmt_open, + .ndo_stop = octeon_mgmt_stop, + .ndo_start_xmit = octeon_mgmt_xmit, + .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, + .ndo_set_mac_address = octeon_mgmt_set_mac_address, + .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_change_mtu = octeon_mgmt_change_mtu, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = octeon_mgmt_poll_controller, +#endif +}; + +static int octeon_mgmt_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + struct octeon_mgmt *p; + const __be32 *data; + const u8 *mac; + struct resource *res_mix; + struct resource *res_agl; + struct resource *res_agl_prt_ctl; + int len; + int result; + + netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); + if (netdev == NULL) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + platform_set_drvdata(pdev, netdev); + p = netdev_priv(netdev); + netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, + OCTEON_MGMT_NAPI_WEIGHT); + + p->netdev = netdev; + p->dev = &pdev->dev; + p->has_rx_tstamp = false; + + data = of_get_property(pdev->dev.of_node, "cell-index", &len); + if (data && len == sizeof(*data)) { + p->port = be32_to_cpup(data); + } else { + dev_err(&pdev->dev, "no 'cell-index' property\n"); + result = -ENXIO; + goto err; + } + + snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); + + result = platform_get_irq(pdev, 0); + if (result < 0) + goto err; + + p->irq = result; + + res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res_mix == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_agl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); + if (res_agl_prt_ctl == NULL) { + dev_err(&pdev->dev, "no 'reg' resource\n"); + result = -ENXIO; + goto err; + } + + p->mix_phys = res_mix->start; + p->mix_size = resource_size(res_mix); + p->agl_phys = res_agl->start; + p->agl_size = resource_size(res_agl); + p->agl_prt_ctl_phys = res_agl_prt_ctl->start; + p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); + + + if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, + res_mix->name)) { + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_mix->name); + result = -ENXIO; + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, + res_agl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl->name); + goto err; + } + + if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, + p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { + result = -ENXIO; + dev_err(&pdev->dev, "request_mem_region (%s) failed\n", + res_agl_prt_ctl->name); + goto err; + } + + p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); + p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); + p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, + p->agl_prt_ctl_size); + spin_lock_init(&p->lock); + + skb_queue_head_init(&p->tx_list); + skb_queue_head_init(&p->rx_list); + tasklet_init(&p->tx_clean_tasklet, + octeon_mgmt_clean_tx_tasklet, (unsigned long)p); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + netdev->netdev_ops = &octeon_mgmt_ops; + netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; + + mac = of_get_mac_address(pdev->dev.of_node); + + if (mac) + memcpy(netdev->dev_addr, mac, ETH_ALEN); + else + eth_hw_addr_random(netdev); + + p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); + + result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (result) + goto err; + + netif_carrier_off(netdev); + result = register_netdev(netdev); + if (result) + goto err; + + dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); + return 0; + +err: + free_netdev(netdev); + return result; +} + +static int octeon_mgmt_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + unregister_netdev(netdev); + free_netdev(netdev); + return 0; +} + +static const struct of_device_id octeon_mgmt_match[] = { + { + .compatible = "cavium,octeon-5750-mix", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, octeon_mgmt_match); + +static struct platform_driver octeon_mgmt_driver = { + .driver = { + .name = "octeon_mgmt", + .of_match_table = octeon_mgmt_match, + }, + .probe = octeon_mgmt_probe, + .remove = octeon_mgmt_remove, +}; + +extern void octeon_mdiobus_force_mod_depencency(void); + +static int __init octeon_mgmt_mod_init(void) +{ + /* Force our mdiobus driver module to be loaded first. */ + octeon_mdiobus_force_mod_depencency(); + return platform_driver_register(&octeon_mgmt_driver); +} + +static void __exit octeon_mgmt_mod_exit(void) +{ + platform_driver_unregister(&octeon_mgmt_driver); +} + +module_init(octeon_mgmt_mod_init); +module_exit(octeon_mgmt_mod_exit); + +MODULE_DESCRIPTION(DRV_DESCRIPTION); +MODULE_AUTHOR("David Daney"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/net/ethernet/octeon/Kconfig b/drivers/net/ethernet/octeon/Kconfig deleted file mode 100644 index a7aa28054cc1..000000000000 --- a/drivers/net/ethernet/octeon/Kconfig +++ /dev/null @@ -1,14 +0,0 @@ -# -# Cavium network device configuration -# - -config OCTEON_MGMT_ETHERNET - tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" - depends on CAVIUM_OCTEON_SOC - select PHYLIB - select MDIO_OCTEON - default y - ---help--- - This option enables the ethernet driver for the management - port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, - CN54XX, CN52XX, and CN6XXX chips. diff --git a/drivers/net/ethernet/octeon/Makefile b/drivers/net/ethernet/octeon/Makefile deleted file mode 100644 index efa41c1d91c5..000000000000 --- a/drivers/net/ethernet/octeon/Makefile +++ /dev/null @@ -1,5 +0,0 @@ -# -# Makefile for the Cavium network device drivers. -# - -obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c deleted file mode 100644 index c177c7cec13b..000000000000 --- a/drivers/net/ethernet/octeon/octeon_mgmt.c +++ /dev/null @@ -1,1593 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2009-2012 Cavium, Inc - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define DRV_NAME "octeon_mgmt" -#define DRV_VERSION "2.0" -#define DRV_DESCRIPTION \ - "Cavium Networks Octeon MII (management) port Network Driver" - -#define OCTEON_MGMT_NAPI_WEIGHT 16 - -/* Ring sizes that are powers of two allow for more efficient modulo - * opertions. - */ -#define OCTEON_MGMT_RX_RING_SIZE 512 -#define OCTEON_MGMT_TX_RING_SIZE 128 - -/* Allow 8 bytes for vlan and FCS. */ -#define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) - -union mgmt_port_ring_entry { - u64 d64; - struct { -#define RING_ENTRY_CODE_DONE 0xf -#define RING_ENTRY_CODE_MORE 0x10 -#ifdef __BIG_ENDIAN_BITFIELD - u64 reserved_62_63:2; - /* Length of the buffer/packet in bytes */ - u64 len:14; - /* For TX, signals that the packet should be timestamped */ - u64 tstamp:1; - /* The RX error code */ - u64 code:7; - /* Physical address of the buffer */ - u64 addr:40; -#else - u64 addr:40; - u64 code:7; - u64 tstamp:1; - u64 len:14; - u64 reserved_62_63:2; -#endif - } s; -}; - -#define MIX_ORING1 0x0 -#define MIX_ORING2 0x8 -#define MIX_IRING1 0x10 -#define MIX_IRING2 0x18 -#define MIX_CTL 0x20 -#define MIX_IRHWM 0x28 -#define MIX_IRCNT 0x30 -#define MIX_ORHWM 0x38 -#define MIX_ORCNT 0x40 -#define MIX_ISR 0x48 -#define MIX_INTENA 0x50 -#define MIX_REMCNT 0x58 -#define MIX_BIST 0x78 - -#define AGL_GMX_PRT_CFG 0x10 -#define AGL_GMX_RX_FRM_CTL 0x18 -#define AGL_GMX_RX_FRM_MAX 0x30 -#define AGL_GMX_RX_JABBER 0x38 -#define AGL_GMX_RX_STATS_CTL 0x50 - -#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 -#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 -#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 - -#define AGL_GMX_RX_ADR_CTL 0x100 -#define AGL_GMX_RX_ADR_CAM_EN 0x108 -#define AGL_GMX_RX_ADR_CAM0 0x180 -#define AGL_GMX_RX_ADR_CAM1 0x188 -#define AGL_GMX_RX_ADR_CAM2 0x190 -#define AGL_GMX_RX_ADR_CAM3 0x198 -#define AGL_GMX_RX_ADR_CAM4 0x1a0 -#define AGL_GMX_RX_ADR_CAM5 0x1a8 - -#define AGL_GMX_TX_CLK 0x208 -#define AGL_GMX_TX_STATS_CTL 0x268 -#define AGL_GMX_TX_CTL 0x270 -#define AGL_GMX_TX_STAT0 0x280 -#define AGL_GMX_TX_STAT1 0x288 -#define AGL_GMX_TX_STAT2 0x290 -#define AGL_GMX_TX_STAT3 0x298 -#define AGL_GMX_TX_STAT4 0x2a0 -#define AGL_GMX_TX_STAT5 0x2a8 -#define AGL_GMX_TX_STAT6 0x2b0 -#define AGL_GMX_TX_STAT7 0x2b8 -#define AGL_GMX_TX_STAT8 0x2c0 -#define AGL_GMX_TX_STAT9 0x2c8 - -struct octeon_mgmt { - struct net_device *netdev; - u64 mix; - u64 agl; - u64 agl_prt_ctl; - int port; - int irq; - bool has_rx_tstamp; - u64 *tx_ring; - dma_addr_t tx_ring_handle; - unsigned int tx_next; - unsigned int tx_next_clean; - unsigned int tx_current_fill; - /* The tx_list lock also protects the ring related variables */ - struct sk_buff_head tx_list; - - /* RX variables only touched in napi_poll. No locking necessary. */ - u64 *rx_ring; - dma_addr_t rx_ring_handle; - unsigned int rx_next; - unsigned int rx_next_fill; - unsigned int rx_current_fill; - struct sk_buff_head rx_list; - - spinlock_t lock; - unsigned int last_duplex; - unsigned int last_link; - unsigned int last_speed; - struct device *dev; - struct napi_struct napi; - struct tasklet_struct tx_clean_tasklet; - struct phy_device *phydev; - struct device_node *phy_np; - resource_size_t mix_phys; - resource_size_t mix_size; - resource_size_t agl_phys; - resource_size_t agl_size; - resource_size_t agl_prt_ctl_phys; - resource_size_t agl_prt_ctl_size; -}; - -static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) -{ - union cvmx_mixx_intena mix_intena; - unsigned long flags; - - spin_lock_irqsave(&p->lock, flags); - mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); - mix_intena.s.ithena = enable ? 1 : 0; - cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); - spin_unlock_irqrestore(&p->lock, flags); -} - -static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) -{ - union cvmx_mixx_intena mix_intena; - unsigned long flags; - - spin_lock_irqsave(&p->lock, flags); - mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); - mix_intena.s.othena = enable ? 1 : 0; - cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); - spin_unlock_irqrestore(&p->lock, flags); -} - -static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) -{ - octeon_mgmt_set_rx_irq(p, 1); -} - -static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) -{ - octeon_mgmt_set_rx_irq(p, 0); -} - -static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) -{ - octeon_mgmt_set_tx_irq(p, 1); -} - -static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) -{ - octeon_mgmt_set_tx_irq(p, 0); -} - -static unsigned int ring_max_fill(unsigned int ring_size) -{ - return ring_size - 8; -} - -static unsigned int ring_size_to_bytes(unsigned int ring_size) -{ - return ring_size * sizeof(union mgmt_port_ring_entry); -} - -static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { - unsigned int size; - union mgmt_port_ring_entry re; - struct sk_buff *skb; - - /* CN56XX pass 1 needs 8 bytes of padding. */ - size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; - - skb = netdev_alloc_skb(netdev, size); - if (!skb) - break; - skb_reserve(skb, NET_IP_ALIGN); - __skb_queue_tail(&p->rx_list, skb); - - re.d64 = 0; - re.s.len = size; - re.s.addr = dma_map_single(p->dev, skb->data, - size, - DMA_FROM_DEVICE); - - /* Put it in the ring. */ - p->rx_ring[p->rx_next_fill] = re.d64; - dma_sync_single_for_device(p->dev, p->rx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), - DMA_BIDIRECTIONAL); - p->rx_next_fill = - (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; - p->rx_current_fill++; - /* Ring the bell. */ - cvmx_write_csr(p->mix + MIX_IRING2, 1); - } -} - -static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) -{ - union cvmx_mixx_orcnt mix_orcnt; - union mgmt_port_ring_entry re; - struct sk_buff *skb; - int cleaned = 0; - unsigned long flags; - - mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); - while (mix_orcnt.s.orcnt) { - spin_lock_irqsave(&p->tx_list.lock, flags); - - mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); - - if (mix_orcnt.s.orcnt == 0) { - spin_unlock_irqrestore(&p->tx_list.lock, flags); - break; - } - - dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), - DMA_BIDIRECTIONAL); - - re.d64 = p->tx_ring[p->tx_next_clean]; - p->tx_next_clean = - (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; - skb = __skb_dequeue(&p->tx_list); - - mix_orcnt.u64 = 0; - mix_orcnt.s.orcnt = 1; - - /* Acknowledge to hardware that we have the buffer. */ - cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); - p->tx_current_fill--; - - spin_unlock_irqrestore(&p->tx_list.lock, flags); - - dma_unmap_single(p->dev, re.s.addr, re.s.len, - DMA_TO_DEVICE); - - /* Read the hardware TX timestamp if one was recorded */ - if (unlikely(re.s.tstamp)) { - struct skb_shared_hwtstamps ts; - u64 ns; - - memset(&ts, 0, sizeof(ts)); - /* Read the timestamp */ - ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); - /* Remove the timestamp from the FIFO */ - cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); - /* Tell the kernel about the timestamp */ - ts.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(skb, &ts); - } - - dev_kfree_skb_any(skb); - cleaned++; - - mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); - } - - if (cleaned && netif_queue_stopped(p->netdev)) - netif_wake_queue(p->netdev); -} - -static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) -{ - struct octeon_mgmt *p = (struct octeon_mgmt *)arg; - octeon_mgmt_clean_tx_buffers(p); - octeon_mgmt_enable_tx_irq(p); -} - -static void octeon_mgmt_update_rx_stats(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - unsigned long flags; - u64 drop, bad; - - /* These reads also clear the count registers. */ - drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); - bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); - - if (drop || bad) { - /* Do an atomic update. */ - spin_lock_irqsave(&p->lock, flags); - netdev->stats.rx_errors += bad; - netdev->stats.rx_dropped += drop; - spin_unlock_irqrestore(&p->lock, flags); - } -} - -static void octeon_mgmt_update_tx_stats(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - unsigned long flags; - - union cvmx_agl_gmx_txx_stat0 s0; - union cvmx_agl_gmx_txx_stat1 s1; - - /* These reads also clear the count registers. */ - s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); - s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); - - if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { - /* Do an atomic update. */ - spin_lock_irqsave(&p->lock, flags); - netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; - netdev->stats.collisions += s1.s.scol + s1.s.mcol; - spin_unlock_irqrestore(&p->lock, flags); - } -} - -/* - * Dequeue a receive skb and its corresponding ring entry. The ring - * entry is returned, *pskb is updated to point to the skb. - */ -static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, - struct sk_buff **pskb) -{ - union mgmt_port_ring_entry re; - - dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), - DMA_BIDIRECTIONAL); - - re.d64 = p->rx_ring[p->rx_next]; - p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; - p->rx_current_fill--; - *pskb = __skb_dequeue(&p->rx_list); - - dma_unmap_single(p->dev, re.s.addr, - ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, - DMA_FROM_DEVICE); - - return re.d64; -} - - -static int octeon_mgmt_receive_one(struct octeon_mgmt *p) -{ - struct net_device *netdev = p->netdev; - union cvmx_mixx_ircnt mix_ircnt; - union mgmt_port_ring_entry re; - struct sk_buff *skb; - struct sk_buff *skb2; - struct sk_buff *skb_new; - union mgmt_port_ring_entry re2; - int rc = 1; - - - re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); - if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { - /* A good packet, send it up. */ - skb_put(skb, re.s.len); -good: - /* Process the RX timestamp if it was recorded */ - if (p->has_rx_tstamp) { - /* The first 8 bytes are the timestamp */ - u64 ns = *(u64 *)skb->data; - struct skb_shared_hwtstamps *ts; - ts = skb_hwtstamps(skb); - ts->hwtstamp = ns_to_ktime(ns); - __skb_pull(skb, 8); - } - skb->protocol = eth_type_trans(skb, netdev); - netdev->stats.rx_packets++; - netdev->stats.rx_bytes += skb->len; - netif_receive_skb(skb); - rc = 0; - } else if (re.s.code == RING_ENTRY_CODE_MORE) { - /* Packet split across skbs. This can happen if we - * increase the MTU. Buffers that are already in the - * rx ring can then end up being too small. As the rx - * ring is refilled, buffers sized for the new MTU - * will be used and we should go back to the normal - * non-split case. - */ - skb_put(skb, re.s.len); - do { - re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); - if (re2.s.code != RING_ENTRY_CODE_MORE - && re2.s.code != RING_ENTRY_CODE_DONE) - goto split_error; - skb_put(skb2, re2.s.len); - skb_new = skb_copy_expand(skb, 0, skb2->len, - GFP_ATOMIC); - if (!skb_new) - goto split_error; - if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), - skb2->len)) - goto split_error; - skb_put(skb_new, skb2->len); - dev_kfree_skb_any(skb); - dev_kfree_skb_any(skb2); - skb = skb_new; - } while (re2.s.code == RING_ENTRY_CODE_MORE); - goto good; - } else { - /* Some other error, discard it. */ - dev_kfree_skb_any(skb); - /* Error statistics are accumulated in - * octeon_mgmt_update_rx_stats. - */ - } - goto done; -split_error: - /* Discard the whole mess. */ - dev_kfree_skb_any(skb); - dev_kfree_skb_any(skb2); - while (re2.s.code == RING_ENTRY_CODE_MORE) { - re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); - dev_kfree_skb_any(skb2); - } - netdev->stats.rx_errors++; - -done: - /* Tell the hardware we processed a packet. */ - mix_ircnt.u64 = 0; - mix_ircnt.s.ircnt = 1; - cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); - return rc; -} - -static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) -{ - unsigned int work_done = 0; - union cvmx_mixx_ircnt mix_ircnt; - int rc; - - mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); - while (work_done < budget && mix_ircnt.s.ircnt) { - - rc = octeon_mgmt_receive_one(p); - if (!rc) - work_done++; - - /* Check for more packets. */ - mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); - } - - octeon_mgmt_rx_fill_ring(p->netdev); - - return work_done; -} - -static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) -{ - struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); - struct net_device *netdev = p->netdev; - unsigned int work_done = 0; - - work_done = octeon_mgmt_receive_packets(p, budget); - - if (work_done < budget) { - /* We stopped because no more packets were available. */ - napi_complete(napi); - octeon_mgmt_enable_rx_irq(p); - } - octeon_mgmt_update_rx_stats(netdev); - - return work_done; -} - -/* Reset the hardware to clean state. */ -static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) -{ - union cvmx_mixx_ctl mix_ctl; - union cvmx_mixx_bist mix_bist; - union cvmx_agl_gmx_bist agl_gmx_bist; - - mix_ctl.u64 = 0; - cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); - do { - mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); - } while (mix_ctl.s.busy); - mix_ctl.s.reset = 1; - cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); - cvmx_read_csr(p->mix + MIX_CTL); - octeon_io_clk_delay(64); - - mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); - if (mix_bist.u64) - dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", - (unsigned long long)mix_bist.u64); - - agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); - if (agl_gmx_bist.u64) - dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", - (unsigned long long)agl_gmx_bist.u64); -} - -struct octeon_mgmt_cam_state { - u64 cam[6]; - u64 cam_mask; - int cam_index; -}; - -static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, - unsigned char *addr) -{ - int i; - - for (i = 0; i < 6; i++) - cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); - cs->cam_mask |= (1ULL << cs->cam_index); - cs->cam_index++; -} - -static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; - union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; - unsigned long flags; - unsigned int prev_packet_enable; - unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ - unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ - struct octeon_mgmt_cam_state cam_state; - struct netdev_hw_addr *ha; - int available_cam_entries; - - memset(&cam_state, 0, sizeof(cam_state)); - - if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { - cam_mode = 0; - available_cam_entries = 8; - } else { - /* One CAM entry for the primary address, leaves seven - * for the secondary addresses. - */ - available_cam_entries = 7 - netdev->uc.count; - } - - if (netdev->flags & IFF_MULTICAST) { - if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || - netdev_mc_count(netdev) > available_cam_entries) - multicast_mode = 2; /* 2 - Accept all multicast. */ - else - multicast_mode = 0; /* 0 - Use CAM. */ - } - - if (cam_mode == 1) { - /* Add primary address. */ - octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); - netdev_for_each_uc_addr(ha, netdev) - octeon_mgmt_cam_state_add(&cam_state, ha->addr); - } - if (multicast_mode == 0) { - netdev_for_each_mc_addr(ha, netdev) - octeon_mgmt_cam_state_add(&cam_state, ha->addr); - } - - spin_lock_irqsave(&p->lock, flags); - - /* Disable packet I/O. */ - agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - prev_packet_enable = agl_gmx_prtx.s.en; - agl_gmx_prtx.s.en = 0; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); - - adr_ctl.u64 = 0; - adr_ctl.s.cam_mode = cam_mode; - adr_ctl.s.mcst = multicast_mode; - adr_ctl.s.bcst = 1; /* Allow broadcast */ - - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); - - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); - cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); - - /* Restore packet I/O. */ - agl_gmx_prtx.s.en = prev_packet_enable; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); - - spin_unlock_irqrestore(&p->lock, flags); -} - -static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) -{ - int r = eth_mac_addr(netdev, addr); - - if (r) - return r; - - octeon_mgmt_set_rx_filtering(netdev); - - return 0; -} - -static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; - - /* Limit the MTU to make sure the ethernet packets are between - * 64 bytes and 16383 bytes. - */ - if (size_without_fcs < 64 || size_without_fcs > 16383) { - dev_warn(p->dev, "MTU must be between %d and %d.\n", - 64 - OCTEON_MGMT_RX_HEADROOM, - 16383 - OCTEON_MGMT_RX_HEADROOM); - return -EINVAL; - } - - netdev->mtu = new_mtu; - - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); - cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, - (size_without_fcs + 7) & 0xfff8); - - return 0; -} - -static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) -{ - struct net_device *netdev = dev_id; - struct octeon_mgmt *p = netdev_priv(netdev); - union cvmx_mixx_isr mixx_isr; - - mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); - - /* Clear any pending interrupts */ - cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); - cvmx_read_csr(p->mix + MIX_ISR); - - if (mixx_isr.s.irthresh) { - octeon_mgmt_disable_rx_irq(p); - napi_schedule(&p->napi); - } - if (mixx_isr.s.orthresh) { - octeon_mgmt_disable_tx_irq(p); - tasklet_schedule(&p->tx_clean_tasklet); - } - - return IRQ_HANDLED; -} - -static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, - struct ifreq *rq, int cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - struct hwtstamp_config config; - union cvmx_mio_ptp_clock_cfg ptp; - union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; - bool have_hw_timestamps = false; - - if (copy_from_user(&config, rq->ifr_data, sizeof(config))) - return -EFAULT; - - if (config.flags) /* reserved for future extensions */ - return -EINVAL; - - /* Check the status of hardware for tiemstamps */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - /* Get the current state of the PTP clock */ - ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); - if (!ptp.s.ext_clk_en) { - /* The clock has not been configured to use an - * external source. Program it to use the main clock - * reference. - */ - u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); - if (!ptp.s.ptp_en) - cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); - pr_info("PTP Clock: Using sclk reference at %lld Hz\n", - (NSEC_PER_SEC << 32) / clock_comp); - } else { - /* The clock is already programmed to use a GPIO */ - u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); - pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", - ptp.s.ext_clk_in, - (NSEC_PER_SEC << 32) / clock_comp); - } - - /* Enable the clock if it wasn't done already */ - if (!ptp.s.ptp_en) { - ptp.s.ptp_en = 1; - cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); - } - have_hw_timestamps = true; - } - - if (!have_hw_timestamps) - return -EINVAL; - - switch (config.tx_type) { - case HWTSTAMP_TX_OFF: - case HWTSTAMP_TX_ON: - break; - default: - return -ERANGE; - } - - switch (config.rx_filter) { - case HWTSTAMP_FILTER_NONE: - p->has_rx_tstamp = false; - rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); - rxx_frm_ctl.s.ptp_mode = 0; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); - break; - case HWTSTAMP_FILTER_ALL: - case HWTSTAMP_FILTER_SOME: - case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: - case HWTSTAMP_FILTER_PTP_V2_EVENT: - case HWTSTAMP_FILTER_PTP_V2_SYNC: - case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - p->has_rx_tstamp = have_hw_timestamps; - config.rx_filter = HWTSTAMP_FILTER_ALL; - if (p->has_rx_tstamp) { - rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); - rxx_frm_ctl.s.ptp_mode = 1; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); - } - break; - default: - return -ERANGE; - } - - if (copy_to_user(rq->ifr_data, &config, sizeof(config))) - return -EFAULT; - - return 0; -} - -static int octeon_mgmt_ioctl(struct net_device *netdev, - struct ifreq *rq, int cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - switch (cmd) { - case SIOCSHWTSTAMP: - return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); - default: - if (p->phydev) - return phy_mii_ioctl(p->phydev, rq, cmd); - return -EINVAL; - } -} - -static void octeon_mgmt_disable_link(struct octeon_mgmt *p) -{ - union cvmx_agl_gmx_prtx_cfg prtx_cfg; - - /* Disable GMX before we make any changes. */ - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - prtx_cfg.s.en = 0; - prtx_cfg.s.tx_en = 0; - prtx_cfg.s.rx_en = 0; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); - - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - int i; - for (i = 0; i < 10; i++) { - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) - break; - mdelay(1); - i++; - } - } -} - -static void octeon_mgmt_enable_link(struct octeon_mgmt *p) -{ - union cvmx_agl_gmx_prtx_cfg prtx_cfg; - - /* Restore the GMX enable state only if link is set */ - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - prtx_cfg.s.tx_en = 1; - prtx_cfg.s.rx_en = 1; - prtx_cfg.s.en = 1; - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); -} - -static void octeon_mgmt_update_link(struct octeon_mgmt *p) -{ - union cvmx_agl_gmx_prtx_cfg prtx_cfg; - - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - - if (!p->phydev->link) - prtx_cfg.s.duplex = 1; - else - prtx_cfg.s.duplex = p->phydev->duplex; - - switch (p->phydev->speed) { - case 10: - prtx_cfg.s.speed = 0; - prtx_cfg.s.slottime = 0; - - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - prtx_cfg.s.burst = 1; - prtx_cfg.s.speed_msb = 1; - } - break; - case 100: - prtx_cfg.s.speed = 0; - prtx_cfg.s.slottime = 0; - - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - prtx_cfg.s.burst = 1; - prtx_cfg.s.speed_msb = 0; - } - break; - case 1000: - /* 1000 MBits is only supported on 6XXX chips */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - prtx_cfg.s.speed = 1; - prtx_cfg.s.speed_msb = 0; - /* Only matters for half-duplex */ - prtx_cfg.s.slottime = 1; - prtx_cfg.s.burst = p->phydev->duplex; - } - break; - case 0: /* No link */ - default: - break; - } - - /* Write the new GMX setting with the port still disabled. */ - cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); - - /* Read GMX CFG again to make sure the config is completed. */ - prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); - - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { - union cvmx_agl_gmx_txx_clk agl_clk; - union cvmx_agl_prtx_ctl prtx_ctl; - - prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); - agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); - /* MII (both speeds) and RGMII 1000 speed. */ - agl_clk.s.clk_cnt = 1; - if (prtx_ctl.s.mode == 0) { /* RGMII mode */ - if (p->phydev->speed == 10) - agl_clk.s.clk_cnt = 50; - else if (p->phydev->speed == 100) - agl_clk.s.clk_cnt = 5; - } - cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); - } -} - -static void octeon_mgmt_adjust_link(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - unsigned long flags; - int link_changed = 0; - - if (!p->phydev) - return; - - spin_lock_irqsave(&p->lock, flags); - - - if (!p->phydev->link && p->last_link) - link_changed = -1; - - if (p->phydev->link - && (p->last_duplex != p->phydev->duplex - || p->last_link != p->phydev->link - || p->last_speed != p->phydev->speed)) { - octeon_mgmt_disable_link(p); - link_changed = 1; - octeon_mgmt_update_link(p); - octeon_mgmt_enable_link(p); - } - - p->last_link = p->phydev->link; - p->last_speed = p->phydev->speed; - p->last_duplex = p->phydev->duplex; - - spin_unlock_irqrestore(&p->lock, flags); - - if (link_changed != 0) { - if (link_changed > 0) { - pr_info("%s: Link is up - %d/%s\n", netdev->name, - p->phydev->speed, - DUPLEX_FULL == p->phydev->duplex ? - "Full" : "Half"); - } else { - pr_info("%s: Link is down\n", netdev->name); - } - } -} - -static int octeon_mgmt_init_phy(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (octeon_is_simulation() || p->phy_np == NULL) { - /* No PHYs in the simulator. */ - netif_carrier_on(netdev); - return 0; - } - - p->phydev = of_phy_connect(netdev, p->phy_np, - octeon_mgmt_adjust_link, 0, - PHY_INTERFACE_MODE_MII); - - if (!p->phydev) - return -ENODEV; - - return 0; -} - -static int octeon_mgmt_open(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - union cvmx_mixx_ctl mix_ctl; - union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; - union cvmx_mixx_oring1 oring1; - union cvmx_mixx_iring1 iring1; - union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; - union cvmx_mixx_irhwm mix_irhwm; - union cvmx_mixx_orhwm mix_orhwm; - union cvmx_mixx_intena mix_intena; - struct sockaddr sa; - - /* Allocate ring buffers. */ - p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), - GFP_KERNEL); - if (!p->tx_ring) - return -ENOMEM; - p->tx_ring_handle = - dma_map_single(p->dev, p->tx_ring, - ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), - DMA_BIDIRECTIONAL); - p->tx_next = 0; - p->tx_next_clean = 0; - p->tx_current_fill = 0; - - - p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), - GFP_KERNEL); - if (!p->rx_ring) - goto err_nomem; - p->rx_ring_handle = - dma_map_single(p->dev, p->rx_ring, - ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), - DMA_BIDIRECTIONAL); - - p->rx_next = 0; - p->rx_next_fill = 0; - p->rx_current_fill = 0; - - octeon_mgmt_reset_hw(p); - - mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); - - /* Bring it out of reset if needed. */ - if (mix_ctl.s.reset) { - mix_ctl.s.reset = 0; - cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); - do { - mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); - } while (mix_ctl.s.reset); - } - - if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { - agl_gmx_inf_mode.u64 = 0; - agl_gmx_inf_mode.s.en = 1; - cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); - } - if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) - || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { - /* Force compensation values, as they are not - * determined properly by HW - */ - union cvmx_agl_gmx_drv_ctl drv_ctl; - - drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); - if (p->port) { - drv_ctl.s.byp_en1 = 1; - drv_ctl.s.nctl1 = 6; - drv_ctl.s.pctl1 = 6; - } else { - drv_ctl.s.byp_en = 1; - drv_ctl.s.nctl = 6; - drv_ctl.s.pctl = 6; - } - cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); - } - - oring1.u64 = 0; - oring1.s.obase = p->tx_ring_handle >> 3; - oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; - cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); - - iring1.u64 = 0; - iring1.s.ibase = p->rx_ring_handle >> 3; - iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; - cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); - - memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); - octeon_mgmt_set_mac_address(netdev, &sa); - - octeon_mgmt_change_mtu(netdev, netdev->mtu); - - /* Enable the port HW. Packets are not allowed until - * cvmx_mgmt_port_enable() is called. - */ - mix_ctl.u64 = 0; - mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ - mix_ctl.s.en = 1; /* Enable the port */ - mix_ctl.s.nbtarb = 0; /* Arbitration mode */ - /* MII CB-request FIFO programmable high watermark */ - mix_ctl.s.mrq_hwm = 1; -#ifdef __LITTLE_ENDIAN - mix_ctl.s.lendian = 1; -#endif - cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); - - /* Read the PHY to find the mode of the interface. */ - if (octeon_mgmt_init_phy(netdev)) { - dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); - goto err_noirq; - } - - /* Set the mode of the interface, RGMII/MII. */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { - union cvmx_agl_prtx_ctl agl_prtx_ctl; - int rgmii_mode = (p->phydev->supported & - (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; - - agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); - agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; - cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); - - /* MII clocks counts are based on the 125Mhz - * reference, which has an 8nS period. So our delays - * need to be multiplied by this factor. - */ -#define NS_PER_PHY_CLK 8 - - /* Take the DLL and clock tree out of reset */ - agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); - agl_prtx_ctl.s.clkrst = 0; - if (rgmii_mode) { - agl_prtx_ctl.s.dllrst = 0; - agl_prtx_ctl.s.clktx_byp = 0; - } - cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); - cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ - - /* Wait for the DLL to lock. External 125 MHz - * reference clock must be stable at this point. - */ - ndelay(256 * NS_PER_PHY_CLK); - - /* Enable the interface */ - agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); - agl_prtx_ctl.s.enable = 1; - cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); - - /* Read the value back to force the previous write */ - agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); - - /* Enable the compensation controller */ - agl_prtx_ctl.s.comp = 1; - agl_prtx_ctl.s.drv_byp = 0; - cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); - /* Force write out before wait. */ - cvmx_read_csr(p->agl_prt_ctl); - - /* For compensation state to lock. */ - ndelay(1040 * NS_PER_PHY_CLK); - - /* Default Interframe Gaps are too small. Recommended - * workaround is. - * - * AGL_GMX_TX_IFG[IFG1]=14 - * AGL_GMX_TX_IFG[IFG2]=10 - */ - cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); - } - - octeon_mgmt_rx_fill_ring(netdev); - - /* Clear statistics. */ - /* Clear on read. */ - cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); - cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); - cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); - - cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); - cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); - cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); - - /* Clear any pending interrupts */ - cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); - - if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, - netdev)) { - dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); - goto err_noirq; - } - - /* Interrupt every single RX packet */ - mix_irhwm.u64 = 0; - mix_irhwm.s.irhwm = 0; - cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); - - /* Interrupt when we have 1 or more packets to clean. */ - mix_orhwm.u64 = 0; - mix_orhwm.s.orhwm = 0; - cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); - - /* Enable receive and transmit interrupts */ - mix_intena.u64 = 0; - mix_intena.s.ithena = 1; - mix_intena.s.othena = 1; - cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); - - /* Enable packet I/O. */ - - rxx_frm_ctl.u64 = 0; - rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; - rxx_frm_ctl.s.pre_align = 1; - /* When set, disables the length check for non-min sized pkts - * with padding in the client data. - */ - rxx_frm_ctl.s.pad_len = 1; - /* When set, disables the length check for VLAN pkts */ - rxx_frm_ctl.s.vlan_len = 1; - /* When set, PREAMBLE checking is less strict */ - rxx_frm_ctl.s.pre_free = 1; - /* Control Pause Frames can match station SMAC */ - rxx_frm_ctl.s.ctl_smac = 0; - /* Control Pause Frames can match globally assign Multicast address */ - rxx_frm_ctl.s.ctl_mcst = 1; - /* Forward pause information to TX block */ - rxx_frm_ctl.s.ctl_bck = 1; - /* Drop Control Pause Frames */ - rxx_frm_ctl.s.ctl_drp = 1; - /* Strip off the preamble */ - rxx_frm_ctl.s.pre_strp = 1; - /* This port is configured to send PREAMBLE+SFD to begin every - * frame. GMX checks that the PREAMBLE is sent correctly. - */ - rxx_frm_ctl.s.pre_chk = 1; - cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); - - /* Configure the port duplex, speed and enables */ - octeon_mgmt_disable_link(p); - if (p->phydev) - octeon_mgmt_update_link(p); - octeon_mgmt_enable_link(p); - - p->last_link = 0; - p->last_speed = 0; - /* PHY is not present in simulator. The carrier is enabled - * while initializing the phy for simulator, leave it enabled. - */ - if (p->phydev) { - netif_carrier_off(netdev); - phy_start_aneg(p->phydev); - } - - netif_wake_queue(netdev); - napi_enable(&p->napi); - - return 0; -err_noirq: - octeon_mgmt_reset_hw(p); - dma_unmap_single(p->dev, p->rx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), - DMA_BIDIRECTIONAL); - kfree(p->rx_ring); -err_nomem: - dma_unmap_single(p->dev, p->tx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), - DMA_BIDIRECTIONAL); - kfree(p->tx_ring); - return -ENOMEM; -} - -static int octeon_mgmt_stop(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - napi_disable(&p->napi); - netif_stop_queue(netdev); - - if (p->phydev) - phy_disconnect(p->phydev); - p->phydev = NULL; - - netif_carrier_off(netdev); - - octeon_mgmt_reset_hw(p); - - free_irq(p->irq, netdev); - - /* dma_unmap is a nop on Octeon, so just free everything. */ - skb_queue_purge(&p->tx_list); - skb_queue_purge(&p->rx_list); - - dma_unmap_single(p->dev, p->rx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), - DMA_BIDIRECTIONAL); - kfree(p->rx_ring); - - dma_unmap_single(p->dev, p->tx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), - DMA_BIDIRECTIONAL); - kfree(p->tx_ring); - - return 0; -} - -static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - union mgmt_port_ring_entry re; - unsigned long flags; - int rv = NETDEV_TX_BUSY; - - re.d64 = 0; - re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); - re.s.len = skb->len; - re.s.addr = dma_map_single(p->dev, skb->data, - skb->len, - DMA_TO_DEVICE); - - spin_lock_irqsave(&p->tx_list.lock, flags); - - if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { - spin_unlock_irqrestore(&p->tx_list.lock, flags); - netif_stop_queue(netdev); - spin_lock_irqsave(&p->tx_list.lock, flags); - } - - if (unlikely(p->tx_current_fill >= - ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { - spin_unlock_irqrestore(&p->tx_list.lock, flags); - dma_unmap_single(p->dev, re.s.addr, re.s.len, - DMA_TO_DEVICE); - goto out; - } - - __skb_queue_tail(&p->tx_list, skb); - - /* Put it in the ring. */ - p->tx_ring[p->tx_next] = re.d64; - p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; - p->tx_current_fill++; - - spin_unlock_irqrestore(&p->tx_list.lock, flags); - - dma_sync_single_for_device(p->dev, p->tx_ring_handle, - ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), - DMA_BIDIRECTIONAL); - - netdev->stats.tx_packets++; - netdev->stats.tx_bytes += skb->len; - - /* Ring the bell. */ - cvmx_write_csr(p->mix + MIX_ORING2, 1); - - netdev->trans_start = jiffies; - rv = NETDEV_TX_OK; -out: - octeon_mgmt_update_tx_stats(netdev); - return rv; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void octeon_mgmt_poll_controller(struct net_device *netdev) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - octeon_mgmt_receive_packets(p, 16); - octeon_mgmt_update_rx_stats(netdev); -} -#endif - -static void octeon_mgmt_get_drvinfo(struct net_device *netdev, - struct ethtool_drvinfo *info) -{ - strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); - strlcpy(info->version, DRV_VERSION, sizeof(info->version)); - strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); - strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); -} - -static int octeon_mgmt_get_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (p->phydev) - return phy_ethtool_gset(p->phydev, cmd); - - return -EOPNOTSUPP; -} - -static int octeon_mgmt_set_settings(struct net_device *netdev, - struct ethtool_cmd *cmd) -{ - struct octeon_mgmt *p = netdev_priv(netdev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (p->phydev) - return phy_ethtool_sset(p->phydev, cmd); - - return -EOPNOTSUPP; -} - -static int octeon_mgmt_nway_reset(struct net_device *dev) -{ - struct octeon_mgmt *p = netdev_priv(dev); - - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - - if (p->phydev) - return phy_start_aneg(p->phydev); - - return -EOPNOTSUPP; -} - -static const struct ethtool_ops octeon_mgmt_ethtool_ops = { - .get_drvinfo = octeon_mgmt_get_drvinfo, - .get_settings = octeon_mgmt_get_settings, - .set_settings = octeon_mgmt_set_settings, - .nway_reset = octeon_mgmt_nway_reset, - .get_link = ethtool_op_get_link, -}; - -static const struct net_device_ops octeon_mgmt_ops = { - .ndo_open = octeon_mgmt_open, - .ndo_stop = octeon_mgmt_stop, - .ndo_start_xmit = octeon_mgmt_xmit, - .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, - .ndo_set_mac_address = octeon_mgmt_set_mac_address, - .ndo_do_ioctl = octeon_mgmt_ioctl, - .ndo_change_mtu = octeon_mgmt_change_mtu, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = octeon_mgmt_poll_controller, -#endif -}; - -static int octeon_mgmt_probe(struct platform_device *pdev) -{ - struct net_device *netdev; - struct octeon_mgmt *p; - const __be32 *data; - const u8 *mac; - struct resource *res_mix; - struct resource *res_agl; - struct resource *res_agl_prt_ctl; - int len; - int result; - - netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); - if (netdev == NULL) - return -ENOMEM; - - SET_NETDEV_DEV(netdev, &pdev->dev); - - platform_set_drvdata(pdev, netdev); - p = netdev_priv(netdev); - netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, - OCTEON_MGMT_NAPI_WEIGHT); - - p->netdev = netdev; - p->dev = &pdev->dev; - p->has_rx_tstamp = false; - - data = of_get_property(pdev->dev.of_node, "cell-index", &len); - if (data && len == sizeof(*data)) { - p->port = be32_to_cpup(data); - } else { - dev_err(&pdev->dev, "no 'cell-index' property\n"); - result = -ENXIO; - goto err; - } - - snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); - - result = platform_get_irq(pdev, 0); - if (result < 0) - goto err; - - p->irq = result; - - res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res_mix == NULL) { - dev_err(&pdev->dev, "no 'reg' resource\n"); - result = -ENXIO; - goto err; - } - - res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res_agl == NULL) { - dev_err(&pdev->dev, "no 'reg' resource\n"); - result = -ENXIO; - goto err; - } - - res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); - if (res_agl_prt_ctl == NULL) { - dev_err(&pdev->dev, "no 'reg' resource\n"); - result = -ENXIO; - goto err; - } - - p->mix_phys = res_mix->start; - p->mix_size = resource_size(res_mix); - p->agl_phys = res_agl->start; - p->agl_size = resource_size(res_agl); - p->agl_prt_ctl_phys = res_agl_prt_ctl->start; - p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); - - - if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, - res_mix->name)) { - dev_err(&pdev->dev, "request_mem_region (%s) failed\n", - res_mix->name); - result = -ENXIO; - goto err; - } - - if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, - res_agl->name)) { - result = -ENXIO; - dev_err(&pdev->dev, "request_mem_region (%s) failed\n", - res_agl->name); - goto err; - } - - if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, - p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { - result = -ENXIO; - dev_err(&pdev->dev, "request_mem_region (%s) failed\n", - res_agl_prt_ctl->name); - goto err; - } - - p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); - p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); - p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, - p->agl_prt_ctl_size); - spin_lock_init(&p->lock); - - skb_queue_head_init(&p->tx_list); - skb_queue_head_init(&p->rx_list); - tasklet_init(&p->tx_clean_tasklet, - octeon_mgmt_clean_tx_tasklet, (unsigned long)p); - - netdev->priv_flags |= IFF_UNICAST_FLT; - - netdev->netdev_ops = &octeon_mgmt_ops; - netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; - - mac = of_get_mac_address(pdev->dev.of_node); - - if (mac) - memcpy(netdev->dev_addr, mac, ETH_ALEN); - else - eth_hw_addr_random(netdev); - - p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); - - result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); - if (result) - goto err; - - netif_carrier_off(netdev); - result = register_netdev(netdev); - if (result) - goto err; - - dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); - return 0; - -err: - free_netdev(netdev); - return result; -} - -static int octeon_mgmt_remove(struct platform_device *pdev) -{ - struct net_device *netdev = platform_get_drvdata(pdev); - - unregister_netdev(netdev); - free_netdev(netdev); - return 0; -} - -static const struct of_device_id octeon_mgmt_match[] = { - { - .compatible = "cavium,octeon-5750-mix", - }, - {}, -}; -MODULE_DEVICE_TABLE(of, octeon_mgmt_match); - -static struct platform_driver octeon_mgmt_driver = { - .driver = { - .name = "octeon_mgmt", - .of_match_table = octeon_mgmt_match, - }, - .probe = octeon_mgmt_probe, - .remove = octeon_mgmt_remove, -}; - -extern void octeon_mdiobus_force_mod_depencency(void); - -static int __init octeon_mgmt_mod_init(void) -{ - /* Force our mdiobus driver module to be loaded first. */ - octeon_mdiobus_force_mod_depencency(); - return platform_driver_register(&octeon_mgmt_driver); -} - -static void __exit octeon_mgmt_mod_exit(void) -{ - platform_driver_unregister(&octeon_mgmt_driver); -} - -module_init(octeon_mgmt_mod_init); -module_exit(octeon_mgmt_mod_exit); - -MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR("David Daney"); -MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_VERSION); -- cgit v1.2.3 From 977bc20cf66367a530d17696f7e1783510a115b7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Mar 2016 10:18:49 +0300 Subject: mediatek: checking for IS_ERR() instead of NULL of_phy_connect() returns NULL on error, it never returns error pointers. Fixes: 656e705243fd ('net-next: mediatek: add support for MT7623 ethernet') Signed-off-by: Dan Carpenter Acked-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1e6c5498bba9..fd19d2d163b8 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -186,9 +186,9 @@ static int mtk_phy_connect_node(struct mtk_eth *eth, struct mtk_mac *mac, phydev = of_phy_connect(eth->netdev[mac->id], phy_node, mtk_phy_link_adjust, 0, phy_mode); - if (IS_ERR(phydev)) { + if (!phydev) { dev_err(eth->dev, "could not connect to PHY\n"); - return PTR_ERR(phydev); + return -ENODEV; } dev_info(eth->dev, -- cgit v1.2.3 From 48e77422a3cb74e04384090e21235f4916278373 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 15 Mar 2016 10:19:04 +0300 Subject: mediatek: unlock on error in mtk_tx_map() There was a missing unlock on the error path. Fixes: 656e705243fd ('net-next: mediatek: add support for MT7623 ethernet') Signed-off-by: Dan Carpenter Acked-by: John Crispin Signed-off-by: David S. Miller --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index fd19d2d163b8..7f2126b6a179 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -661,6 +661,8 @@ err_dma: itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); } while (itxd != txd); + spin_unlock_irqrestore(ð->page_lock, flags); + return -ENOMEM; } -- cgit v1.2.3 From cc2fa619a738a052eb90ccbbbc48947a2e2aa1d3 Mon Sep 17 00:00:00 2001 From: Phil Reid Date: Tue, 15 Mar 2016 15:34:33 +0800 Subject: net: stmmac: Don't search for phys if mdio node is defined. If a dt mdio entry has been added least assume that we wont search for phys attached. The DT and of_mdiobus_register already do this. This stops DSA phys being found and phys created for them, as this is handled by the DSA driver. Signed-off-by: Phil Reid Acked-by: Giuseppe Cavallaro Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index efb54f356a67..ea76129dafc2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -243,6 +243,9 @@ int stmmac_mdio_register(struct net_device *ndev) goto bus_register_fail; } + if (priv->plat->phy_node || mdio_node) + goto bus_register_done; + found = 0; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { struct phy_device *phydev = mdiobus_get_phy(new_bus, addr); @@ -298,6 +301,7 @@ int stmmac_mdio_register(struct net_device *ndev) return -ENODEV; } +bus_register_done: priv->mii = new_bus; return 0; -- cgit v1.2.3 From 2c9a266afefe137bff06bbe0fc48b4d3b3cb348c Mon Sep 17 00:00:00 2001 From: Manish Chopra Date: Tue, 15 Mar 2016 07:13:45 -0400 Subject: qlge: Fix receive packets drop. When running small packets [length < 256 bytes] traffic, packets were being dropped due to invalid data in those packets which were delivered by the driver upto the stack. Using pci_dma_sync_single_for_cpu ensures copying latest and updated data into skb from the receive buffer. Signed-off-by: Sony Chacko Signed-off-by: Manish Chopra Signed-off-by: David S. Miller --- drivers/net/ethernet/qlogic/qlge/qlge_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 997976426799..b28e73ea2c25 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1648,7 +1648,18 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev, return; } skb_reserve(new_skb, NET_IP_ALIGN); + + pci_dma_sync_single_for_cpu(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); + memcpy(skb_put(new_skb, length), skb->data, length); + + pci_dma_sync_single_for_device(qdev->pdev, + dma_unmap_addr(sbq_desc, mapaddr), + dma_unmap_len(sbq_desc, maplen), + PCI_DMA_FROMDEVICE); skb = new_skb; /* Frame error, so drop the packet. */ -- cgit v1.2.3 From 38c848c73180fb1d142a39e664b6d9663751f6b2 Mon Sep 17 00:00:00 2001 From: Yoshihiro Kaneko Date: Wed, 16 Mar 2016 00:52:16 +0900 Subject: ravb: fix result value overwrite The result value is overwritten by a return value of ravb_ptp_interrupt(). Signed-off-by: Yoshihiro Kaneko Acked-by: Sergei Shtylyov Signed-off-by: David S. Miller --- drivers/net/ethernet/renesas/ravb_main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 8f2c4fb4c724..4e1a7dba7c4a 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -757,8 +757,8 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } - if (iss & ISS_CGIS) - result = ravb_ptp_interrupt(ndev); + if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + result = IRQ_HANDLED; mmiowb(); spin_unlock(&priv->lock); -- cgit v1.2.3 From 1e6bb1a3540fec3ef112b9a89dda88e684c3ff59 Mon Sep 17 00:00:00 2001 From: Yuval Mintz Date: Tue, 15 Mar 2016 19:52:04 +0200 Subject: bnx2x: Prevent false warning for lack of FC NPIV Not all adapters have FC-NPIV configured. If bnx2fc is used with such an adapter, driver would read irrelevant data from the the nvram and log "FC-NPIV table with bad length..." In system logs. Simply accept that reading '0' as the feature offset in nvram indicates the feature isn't there and return. Reported-by: Andrew Patterson Signed-off-by: Yuval Mintz Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b597c32275aa..d465bd721146 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -14885,6 +14885,10 @@ static int bnx2x_get_fc_npiv(struct net_device *dev, } offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]); + if (!offset) { + DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n"); + goto out; + } DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset); /* Read the table contents from nvram */ -- cgit v1.2.3 From 31762eaa0d0804d34e297daad57cda45cbc6c961 Mon Sep 17 00:00:00 2001 From: Aaron Young Date: Tue, 15 Mar 2016 11:35:37 -0700 Subject: ldmvsw: Split sunvnet driver into common code Split sunvnet.c into sunvnet.c and sunvnet_common.c. Details: Since the sunvnet and ldmvsw drivers will both use common sunvnet code, move the functions (and support functions) anticipated to be common code from sunvnet.c to sunvnet_common.c. Similarly, sunvnet.h was renamed to sunvnet_common.h. The sunvnet_common.c code will be compiled into the kernel and act as a library of functions that are linked by either (or both) drivers when loaded. Function names for external functions in sunvnet_common.c (to be called by both the sunvnet and ldmvsw drivers) were tagged with a "_common" suffix to clearly designate them as common functions. No functional changes as of yet... just moved code verbatim to the new sunvnet_common.c/h files. Makefile/Kconfig support added to build sunvnet_common.c file. The code is included in the kernel if SUN_LDOMS is defined/selected. NOTE - per the SubmittingPatches documentation, since the code was just moved from one file another, the code was NOT checkpatch'd in this commit to aid in review. Signed-off-by: Aaron Young Signed-off-by: Rashmi Narasimhan Reviewed-by: Sowmini Varadhan Reviewed-by: Alexandre Chartre Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/Kconfig | 5 + drivers/net/ethernet/sun/Makefile | 1 + drivers/net/ethernet/sun/sunvnet.c | 1755 +---------------------------- drivers/net/ethernet/sun/sunvnet.h | 114 -- drivers/net/ethernet/sun/sunvnet_common.c | 1753 ++++++++++++++++++++++++++++ drivers/net/ethernet/sun/sunvnet_common.h | 133 +++ 6 files changed, 1917 insertions(+), 1844 deletions(-) delete mode 100644 drivers/net/ethernet/sun/sunvnet.h create mode 100644 drivers/net/ethernet/sun/sunvnet_common.c create mode 100644 drivers/net/ethernet/sun/sunvnet_common.h (limited to 'drivers') diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index dee94b67638c..aa58c11ea6db 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -69,6 +69,11 @@ config CASSINI Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also . +config SUNVNET_COMMON + bool + depends on SUN_LDOMS + default y if SUN_LDOMS + config SUNVNET tristate "Sun Virtual Network support" depends on SUN_LDOMS diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile index 1e620ff88eba..7b622aa14c5b 100644 --- a/drivers/net/ethernet/sun/Makefile +++ b/drivers/net/ethernet/sun/Makefile @@ -7,5 +7,6 @@ obj-$(CONFIG_SUNQE) += sunqe.o obj-$(CONFIG_SUNBMAC) += sunbmac.o obj-$(CONFIG_SUNGEM) += sungem.o obj-$(CONFIG_CASSINI) += cassini.o +obj-$(CONFIG_SUNVNET_COMMON) += sunvnet_common.o obj-$(CONFIG_SUNVNET) += sunvnet.o obj-$(CONFIG_NIU) += niu.o diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 942a95db2061..5b9113524188 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -17,8 +17,6 @@ #include #include #include -#define CREATE_TRACE_POINTS -#include #if IS_ENABLED(CONFIG_IPV6) #include @@ -31,7 +29,12 @@ #include #include -#include "sunvnet.h" +#include "sunvnet_common.h" + +/* length of time before we decide the hardware is borked, + * and dev->tx_timeout() should be called to fix the problem + */ +#define VNET_TX_TIMEOUT (5 * HZ) #define DRV_MODULE_NAME "sunvnet" #define DRV_MODULE_VERSION "1.0" @@ -44,16 +47,6 @@ MODULE_DESCRIPTION("Sun LDOM virtual network driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); -#define VNET_MAX_TXQS 16 - -/* Heuristic for the number of times to exponentially backoff and - * retry sending an LDC trigger when EAGAIN is encountered - */ -#define VNET_MAX_RETRIES 10 - -static int __vnet_tx_trigger(struct vnet_port *port, u32 start); -static void vnet_port_reset(struct vnet_port *port); - /* Ordered from largest major to lowest */ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 8 }, @@ -62,1578 +55,6 @@ static struct vio_version vnet_versions[] = { { .major = 1, .minor = 0 }, }; -static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) -{ - return vio_dring_avail(dr, VNET_TX_RING_SIZE); -} - -static int vnet_handle_unknown(struct vnet_port *port, void *arg) -{ - struct vio_msg_tag *pkt = arg; - - pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", - pkt->type, pkt->stype, pkt->stype_env, pkt->sid); - pr_err("Resetting connection\n"); - - ldc_disconnect(port->vio.lp); - - return -ECONNRESET; -} - -static int vnet_port_alloc_tx_ring(struct vnet_port *port); - -static int vnet_send_attr(struct vio_driver_state *vio) -{ - struct vnet_port *port = to_vnet_port(vio); - struct net_device *dev = port->vp->dev; - struct vio_net_attr_info pkt; - int framelen = ETH_FRAME_LEN; - int i, err; - - err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); - if (err) - return err; - - memset(&pkt, 0, sizeof(pkt)); - pkt.tag.type = VIO_TYPE_CTRL; - pkt.tag.stype = VIO_SUBTYPE_INFO; - pkt.tag.stype_env = VIO_ATTR_INFO; - pkt.tag.sid = vio_send_sid(vio); - if (vio_version_before(vio, 1, 2)) - pkt.xfer_mode = VIO_DRING_MODE; - else - pkt.xfer_mode = VIO_NEW_DRING_MODE; - pkt.addr_type = VNET_ADDR_ETHERMAC; - pkt.ack_freq = 0; - for (i = 0; i < 6; i++) - pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); - if (vio_version_after(vio, 1, 3)) { - if (port->rmtu) { - port->rmtu = min(VNET_MAXPACKET, port->rmtu); - pkt.mtu = port->rmtu; - } else { - port->rmtu = VNET_MAXPACKET; - pkt.mtu = port->rmtu; - } - if (vio_version_after_eq(vio, 1, 6)) - pkt.options = VIO_TX_DRING; - } else if (vio_version_before(vio, 1, 3)) { - pkt.mtu = framelen; - } else { /* v1.3 */ - pkt.mtu = framelen + VLAN_HLEN; - } - - pkt.cflags = 0; - if (vio_version_after_eq(vio, 1, 7) && port->tso) { - pkt.cflags |= VNET_LSO_IPV4_CAPAB; - if (!port->tsolen) - port->tsolen = VNET_MAXTSO; - pkt.ipv4_lso_maxlen = port->tsolen; - } - - pkt.plnk_updt = PHYSLINK_UPDATE_NONE; - - viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " - "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " - "cflags[0x%04x] lso_max[%u]\n", - pkt.xfer_mode, pkt.addr_type, - (unsigned long long)pkt.addr, - pkt.ack_freq, pkt.plnk_updt, pkt.options, - (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); - - - return vio_ldc_send(vio, &pkt, sizeof(pkt)); -} - -static int handle_attr_info(struct vio_driver_state *vio, - struct vio_net_attr_info *pkt) -{ - struct vnet_port *port = to_vnet_port(vio); - u64 localmtu; - u8 xfer_mode; - - viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " - "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " - " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", - pkt->xfer_mode, pkt->addr_type, - (unsigned long long)pkt->addr, - pkt->ack_freq, pkt->plnk_updt, pkt->options, - (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, - pkt->ipv4_lso_maxlen); - - pkt->tag.sid = vio_send_sid(vio); - - xfer_mode = pkt->xfer_mode; - /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ - if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) - xfer_mode = VIO_NEW_DRING_MODE; - - /* MTU negotiation: - * < v1.3 - ETH_FRAME_LEN exactly - * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change - * pkt->mtu for ACK - * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly - */ - if (vio_version_before(vio, 1, 3)) { - localmtu = ETH_FRAME_LEN; - } else if (vio_version_after(vio, 1, 3)) { - localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; - localmtu = min(pkt->mtu, localmtu); - pkt->mtu = localmtu; - } else { /* v1.3 */ - localmtu = ETH_FRAME_LEN + VLAN_HLEN; - } - port->rmtu = localmtu; - - /* LSO negotiation */ - if (vio_version_after_eq(vio, 1, 7)) - port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); - else - port->tso = false; - if (port->tso) { - if (!port->tsolen) - port->tsolen = VNET_MAXTSO; - port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); - if (port->tsolen < VNET_MINTSO) { - port->tso = false; - port->tsolen = 0; - pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; - } - pkt->ipv4_lso_maxlen = port->tsolen; - } else { - pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; - pkt->ipv4_lso_maxlen = 0; - } - - /* for version >= 1.6, ACK packet mode we support */ - if (vio_version_after_eq(vio, 1, 6)) { - pkt->xfer_mode = VIO_NEW_DRING_MODE; - pkt->options = VIO_TX_DRING; - } - - if (!(xfer_mode | VIO_NEW_DRING_MODE) || - pkt->addr_type != VNET_ADDR_ETHERMAC || - pkt->mtu != localmtu) { - viodbg(HS, "SEND NET ATTR NACK\n"); - - pkt->tag.stype = VIO_SUBTYPE_NACK; - - (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); - - return -ECONNRESET; - } else { - viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " - "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " - "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", - pkt->xfer_mode, pkt->addr_type, - (unsigned long long)pkt->addr, - pkt->ack_freq, pkt->plnk_updt, pkt->options, - (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, - pkt->ipv4_lso_maxlen); - - pkt->tag.stype = VIO_SUBTYPE_ACK; - - return vio_ldc_send(vio, pkt, sizeof(*pkt)); - } - -} - -static int handle_attr_ack(struct vio_driver_state *vio, - struct vio_net_attr_info *pkt) -{ - viodbg(HS, "GOT NET ATTR ACK\n"); - - return 0; -} - -static int handle_attr_nack(struct vio_driver_state *vio, - struct vio_net_attr_info *pkt) -{ - viodbg(HS, "GOT NET ATTR NACK\n"); - - return -ECONNRESET; -} - -static int vnet_handle_attr(struct vio_driver_state *vio, void *arg) -{ - struct vio_net_attr_info *pkt = arg; - - switch (pkt->tag.stype) { - case VIO_SUBTYPE_INFO: - return handle_attr_info(vio, pkt); - - case VIO_SUBTYPE_ACK: - return handle_attr_ack(vio, pkt); - - case VIO_SUBTYPE_NACK: - return handle_attr_nack(vio, pkt); - - default: - return -ECONNRESET; - } -} - -static void vnet_handshake_complete(struct vio_driver_state *vio) -{ - struct vio_dring_state *dr; - - dr = &vio->drings[VIO_DRIVER_RX_RING]; - dr->snd_nxt = dr->rcv_nxt = 1; - - dr = &vio->drings[VIO_DRIVER_TX_RING]; - dr->snd_nxt = dr->rcv_nxt = 1; -} - -/* The hypervisor interface that implements copying to/from imported - * memory from another domain requires that copies are done to 8-byte - * aligned buffers, and that the lengths of such copies are also 8-byte - * multiples. - * - * So we align skb->data to an 8-byte multiple and pad-out the data - * area so we can round the copy length up to the next multiple of - * 8 for the copy. - * - * The transmitter puts the actual start of the packet 6 bytes into - * the buffer it sends over, so that the IP headers after the ethernet - * header are aligned properly. These 6 bytes are not in the descriptor - * length, they are simply implied. This offset is represented using - * the VNET_PACKET_SKIP macro. - */ -static struct sk_buff *alloc_and_align_skb(struct net_device *dev, - unsigned int len) -{ - struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); - unsigned long addr, off; - - if (unlikely(!skb)) - return NULL; - - addr = (unsigned long) skb->data; - off = ((addr + 7UL) & ~7UL) - addr; - if (off) - skb_reserve(skb, off); - - return skb; -} - -static inline void vnet_fullcsum(struct sk_buff *skb) -{ - struct iphdr *iph = ip_hdr(skb); - int offset = skb_transport_offset(skb); - - if (skb->protocol != htons(ETH_P_IP)) - return; - if (iph->protocol != IPPROTO_TCP && - iph->protocol != IPPROTO_UDP) - return; - skb->ip_summed = CHECKSUM_NONE; - skb->csum_level = 1; - skb->csum = 0; - if (iph->protocol == IPPROTO_TCP) { - struct tcphdr *ptcp = tcp_hdr(skb); - - ptcp->check = 0; - skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); - ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - offset, IPPROTO_TCP, - skb->csum); - } else if (iph->protocol == IPPROTO_UDP) { - struct udphdr *pudp = udp_hdr(skb); - - pudp->check = 0; - skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); - pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - offset, IPPROTO_UDP, - skb->csum); - } -} - -static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) -{ - struct net_device *dev = port->vp->dev; - unsigned int len = desc->size; - unsigned int copy_len; - struct sk_buff *skb; - int maxlen; - int err; - - err = -EMSGSIZE; - if (port->tso && port->tsolen > port->rmtu) - maxlen = port->tsolen; - else - maxlen = port->rmtu; - if (unlikely(len < ETH_ZLEN || len > maxlen)) { - dev->stats.rx_length_errors++; - goto out_dropped; - } - - skb = alloc_and_align_skb(dev, len); - err = -ENOMEM; - if (unlikely(!skb)) { - dev->stats.rx_missed_errors++; - goto out_dropped; - } - - copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; - skb_put(skb, copy_len); - err = ldc_copy(port->vio.lp, LDC_COPY_IN, - skb->data, copy_len, 0, - desc->cookies, desc->ncookies); - if (unlikely(err < 0)) { - dev->stats.rx_frame_errors++; - goto out_free_skb; - } - - skb_pull(skb, VNET_PACKET_SKIP); - skb_trim(skb, len); - skb->protocol = eth_type_trans(skb, dev); - - if (vio_version_after_eq(&port->vio, 1, 8)) { - struct vio_net_dext *dext = vio_net_ext(desc); - - skb_reset_network_header(skb); - - if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { - if (skb->protocol == ETH_P_IP) { - struct iphdr *iph = ip_hdr(skb); - - iph->check = 0; - ip_send_check(iph); - } - } - if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && - skb->ip_summed == CHECKSUM_NONE) { - if (skb->protocol == htons(ETH_P_IP)) { - struct iphdr *iph = ip_hdr(skb); - int ihl = iph->ihl * 4; - - skb_reset_transport_header(skb); - skb_set_transport_header(skb, ihl); - vnet_fullcsum(skb); - } - } - if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { - skb->ip_summed = CHECKSUM_PARTIAL; - skb->csum_level = 0; - if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) - skb->csum_level = 1; - } - } - - skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; - - dev->stats.rx_packets++; - dev->stats.rx_bytes += len; - napi_gro_receive(&port->napi, skb); - return 0; - -out_free_skb: - kfree_skb(skb); - -out_dropped: - dev->stats.rx_dropped++; - return err; -} - -static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, - u32 start, u32 end, u8 vio_dring_state) -{ - struct vio_dring_data hdr = { - .tag = { - .type = VIO_TYPE_DATA, - .stype = VIO_SUBTYPE_ACK, - .stype_env = VIO_DRING_DATA, - .sid = vio_send_sid(&port->vio), - }, - .dring_ident = dr->ident, - .start_idx = start, - .end_idx = end, - .state = vio_dring_state, - }; - int err, delay; - int retries = 0; - - hdr.seq = dr->snd_nxt; - delay = 1; - do { - err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); - if (err > 0) { - dr->snd_nxt++; - break; - } - udelay(delay); - if ((delay <<= 1) > 128) - delay = 128; - if (retries++ > VNET_MAX_RETRIES) { - pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", - port->raddr[0], port->raddr[1], - port->raddr[2], port->raddr[3], - port->raddr[4], port->raddr[5]); - break; - } - } while (err == -EAGAIN); - - if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { - port->stop_rx_idx = end; - port->stop_rx = true; - } else { - port->stop_rx_idx = 0; - port->stop_rx = false; - } - - return err; -} - -static struct vio_net_desc *get_rx_desc(struct vnet_port *port, - struct vio_dring_state *dr, - u32 index) -{ - struct vio_net_desc *desc = port->vio.desc_buf; - int err; - - err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, - (index * dr->entry_size), - dr->cookies, dr->ncookies); - if (err < 0) - return ERR_PTR(err); - - return desc; -} - -static int put_rx_desc(struct vnet_port *port, - struct vio_dring_state *dr, - struct vio_net_desc *desc, - u32 index) -{ - int err; - - err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, - (index * dr->entry_size), - dr->cookies, dr->ncookies); - if (err < 0) - return err; - - return 0; -} - -static int vnet_walk_rx_one(struct vnet_port *port, - struct vio_dring_state *dr, - u32 index, int *needs_ack) -{ - struct vio_net_desc *desc = get_rx_desc(port, dr, index); - struct vio_driver_state *vio = &port->vio; - int err; - - BUG_ON(desc == NULL); - if (IS_ERR(desc)) - return PTR_ERR(desc); - - if (desc->hdr.state != VIO_DESC_READY) - return 1; - - dma_rmb(); - - viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", - desc->hdr.state, desc->hdr.ack, - desc->size, desc->ncookies, - desc->cookies[0].cookie_addr, - desc->cookies[0].cookie_size); - - err = vnet_rx_one(port, desc); - if (err == -ECONNRESET) - return err; - trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, - index, desc->hdr.ack); - desc->hdr.state = VIO_DESC_DONE; - err = put_rx_desc(port, dr, desc, index); - if (err < 0) - return err; - *needs_ack = desc->hdr.ack; - return 0; -} - -static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, - u32 start, u32 end, int *npkts, int budget) -{ - struct vio_driver_state *vio = &port->vio; - int ack_start = -1, ack_end = -1; - bool send_ack = true; - - end = (end == (u32) -1) ? vio_dring_prev(dr, start) - : vio_dring_next(dr, end); - - viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); - - while (start != end) { - int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); - if (err == -ECONNRESET) - return err; - if (err != 0) - break; - (*npkts)++; - if (ack_start == -1) - ack_start = start; - ack_end = start; - start = vio_dring_next(dr, start); - if (ack && start != end) { - err = vnet_send_ack(port, dr, ack_start, ack_end, - VIO_DRING_ACTIVE); - if (err == -ECONNRESET) - return err; - ack_start = -1; - } - if ((*npkts) >= budget) { - send_ack = false; - break; - } - } - if (unlikely(ack_start == -1)) - ack_start = ack_end = vio_dring_prev(dr, start); - if (send_ack) { - port->napi_resume = false; - trace_vnet_tx_send_stopped_ack(port->vio._local_sid, - port->vio._peer_sid, - ack_end, *npkts); - return vnet_send_ack(port, dr, ack_start, ack_end, - VIO_DRING_STOPPED); - } else { - trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, - port->vio._peer_sid, - ack_end, *npkts); - port->napi_resume = true; - port->napi_stop_idx = ack_end; - return 1; - } -} - -static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, - int budget) -{ - struct vio_dring_data *pkt = msgbuf; - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; - struct vio_driver_state *vio = &port->vio; - - viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", - pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); - - if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) - return 0; - if (unlikely(pkt->seq != dr->rcv_nxt)) { - pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", - pkt->seq, dr->rcv_nxt); - return 0; - } - - if (!port->napi_resume) - dr->rcv_nxt++; - - /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ - - return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, - npkts, budget); -} - -static int idx_is_pending(struct vio_dring_state *dr, u32 end) -{ - u32 idx = dr->cons; - int found = 0; - - while (idx != dr->prod) { - if (idx == end) { - found = 1; - break; - } - idx = vio_dring_next(dr, idx); - } - return found; -} - -static int vnet_ack(struct vnet_port *port, void *msgbuf) -{ - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct vio_dring_data *pkt = msgbuf; - struct net_device *dev; - struct vnet *vp; - u32 end; - struct vio_net_desc *desc; - struct netdev_queue *txq; - - if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) - return 0; - - end = pkt->end_idx; - vp = port->vp; - dev = vp->dev; - netif_tx_lock(dev); - if (unlikely(!idx_is_pending(dr, end))) { - netif_tx_unlock(dev); - return 0; - } - - /* sync for race conditions with vnet_start_xmit() and tell xmit it - * is time to send a trigger. - */ - trace_vnet_rx_stopped_ack(port->vio._local_sid, - port->vio._peer_sid, end); - dr->cons = vio_dring_next(dr, end); - desc = vio_dring_entry(dr, dr->cons); - if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { - /* vnet_start_xmit() just populated this dring but missed - * sending the "start" LDC message to the consumer. - * Send a "start" trigger on its behalf. - */ - if (__vnet_tx_trigger(port, dr->cons) > 0) - port->start_cons = false; - else - port->start_cons = true; - } else { - port->start_cons = true; - } - netif_tx_unlock(dev); - - txq = netdev_get_tx_queue(dev, port->q_index); - if (unlikely(netif_tx_queue_stopped(txq) && - vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) - return 1; - - return 0; -} - -static int vnet_nack(struct vnet_port *port, void *msgbuf) -{ - /* XXX just reset or similar XXX */ - return 0; -} - -static int handle_mcast(struct vnet_port *port, void *msgbuf) -{ - struct vio_net_mcast_info *pkt = msgbuf; - - if (pkt->tag.stype != VIO_SUBTYPE_ACK) - pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", - port->vp->dev->name, - pkt->tag.type, - pkt->tag.stype, - pkt->tag.stype_env, - pkt->tag.sid); - - return 0; -} - -/* Got back a STOPPED LDC message on port. If the queue is stopped, - * wake it up so that we'll send out another START message at the - * next TX. - */ -static void maybe_tx_wakeup(struct vnet_port *port) -{ - struct netdev_queue *txq; - - txq = netdev_get_tx_queue(port->vp->dev, port->q_index); - __netif_tx_lock(txq, smp_processor_id()); - if (likely(netif_tx_queue_stopped(txq))) { - struct vio_dring_state *dr; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - netif_tx_wake_queue(txq); - } - __netif_tx_unlock(txq); -} - -static inline bool port_is_up(struct vnet_port *vnet) -{ - struct vio_driver_state *vio = &vnet->vio; - - return !!(vio->hs_state & VIO_HS_COMPLETE); -} - -static int vnet_event_napi(struct vnet_port *port, int budget) -{ - struct vio_driver_state *vio = &port->vio; - int tx_wakeup, err; - int npkts = 0; - int event = (port->rx_event & LDC_EVENT_RESET); - -ldc_ctrl: - if (unlikely(event == LDC_EVENT_RESET || - event == LDC_EVENT_UP)) { - vio_link_state_change(vio, event); - - if (event == LDC_EVENT_RESET) { - vnet_port_reset(port); - vio_port_up(vio); - } - port->rx_event = 0; - return 0; - } - /* We may have multiple LDC events in rx_event. Unroll send_events() */ - event = (port->rx_event & LDC_EVENT_UP); - port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); - if (event == LDC_EVENT_UP) - goto ldc_ctrl; - event = port->rx_event; - if (!(event & LDC_EVENT_DATA_READY)) - return 0; - - /* we dont expect any other bits than RESET, UP, DATA_READY */ - BUG_ON(event != LDC_EVENT_DATA_READY); - - tx_wakeup = err = 0; - while (1) { - union { - struct vio_msg_tag tag; - u64 raw[8]; - } msgbuf; - - if (port->napi_resume) { - struct vio_dring_data *pkt = - (struct vio_dring_data *)&msgbuf; - struct vio_dring_state *dr = - &port->vio.drings[VIO_DRIVER_RX_RING]; - - pkt->tag.type = VIO_TYPE_DATA; - pkt->tag.stype = VIO_SUBTYPE_INFO; - pkt->tag.stype_env = VIO_DRING_DATA; - pkt->seq = dr->rcv_nxt; - pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); - pkt->end_idx = -1; - goto napi_resume; - } - err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); - if (unlikely(err < 0)) { - if (err == -ECONNRESET) - vio_conn_reset(vio); - break; - } - if (err == 0) - break; - viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", - msgbuf.tag.type, - msgbuf.tag.stype, - msgbuf.tag.stype_env, - msgbuf.tag.sid); - err = vio_validate_sid(vio, &msgbuf.tag); - if (err < 0) - break; -napi_resume: - if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { - if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { - if (!port_is_up(port)) { - /* failures like handshake_failure() - * may have cleaned up dring, but - * NAPI polling may bring us here. - */ - err = -ECONNRESET; - break; - } - err = vnet_rx(port, &msgbuf, &npkts, budget); - if (npkts >= budget) - break; - if (npkts == 0) - break; - } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { - err = vnet_ack(port, &msgbuf); - if (err > 0) - tx_wakeup |= err; - } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { - err = vnet_nack(port, &msgbuf); - } - } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { - if (msgbuf.tag.stype_env == VNET_MCAST_INFO) - err = handle_mcast(port, &msgbuf); - else - err = vio_control_pkt_engine(vio, &msgbuf); - if (err) - break; - } else { - err = vnet_handle_unknown(port, &msgbuf); - } - if (err == -ECONNRESET) - break; - } - if (unlikely(tx_wakeup && err != -ECONNRESET)) - maybe_tx_wakeup(port); - return npkts; -} - -static int vnet_poll(struct napi_struct *napi, int budget) -{ - struct vnet_port *port = container_of(napi, struct vnet_port, napi); - struct vio_driver_state *vio = &port->vio; - int processed = vnet_event_napi(port, budget); - - if (processed < budget) { - napi_complete(napi); - port->rx_event &= ~LDC_EVENT_DATA_READY; - vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); - } - return processed; -} - -static void vnet_event(void *arg, int event) -{ - struct vnet_port *port = arg; - struct vio_driver_state *vio = &port->vio; - - port->rx_event |= event; - vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); - napi_schedule(&port->napi); - -} - -static int __vnet_tx_trigger(struct vnet_port *port, u32 start) -{ - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct vio_dring_data hdr = { - .tag = { - .type = VIO_TYPE_DATA, - .stype = VIO_SUBTYPE_INFO, - .stype_env = VIO_DRING_DATA, - .sid = vio_send_sid(&port->vio), - }, - .dring_ident = dr->ident, - .start_idx = start, - .end_idx = (u32) -1, - }; - int err, delay; - int retries = 0; - - if (port->stop_rx) { - trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, - port->vio._peer_sid, - port->stop_rx_idx, -1); - err = vnet_send_ack(port, - &port->vio.drings[VIO_DRIVER_RX_RING], - port->stop_rx_idx, -1, - VIO_DRING_STOPPED); - if (err <= 0) - return err; - } - - hdr.seq = dr->snd_nxt; - delay = 1; - do { - err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); - if (err > 0) { - dr->snd_nxt++; - break; - } - udelay(delay); - if ((delay <<= 1) > 128) - delay = 128; - if (retries++ > VNET_MAX_RETRIES) - break; - } while (err == -EAGAIN); - trace_vnet_tx_trigger(port->vio._local_sid, - port->vio._peer_sid, start, err); - - return err; -} - -struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) -{ - unsigned int hash = vnet_hashfn(skb->data); - struct hlist_head *hp = &vp->port_hash[hash]; - struct vnet_port *port; - - hlist_for_each_entry_rcu(port, hp, hash) { - if (!port_is_up(port)) - continue; - if (ether_addr_equal(port->raddr, skb->data)) - return port; - } - list_for_each_entry_rcu(port, &vp->port_list, list) { - if (!port->switch_port) - continue; - if (!port_is_up(port)) - continue; - return port; - } - return NULL; -} - -static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, - unsigned *pending) -{ - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct sk_buff *skb = NULL; - int i, txi; - - *pending = 0; - - txi = dr->prod; - for (i = 0; i < VNET_TX_RING_SIZE; ++i) { - struct vio_net_desc *d; - - --txi; - if (txi < 0) - txi = VNET_TX_RING_SIZE-1; - - d = vio_dring_entry(dr, txi); - - if (d->hdr.state == VIO_DESC_READY) { - (*pending)++; - continue; - } - if (port->tx_bufs[txi].skb) { - if (d->hdr.state != VIO_DESC_DONE) - pr_notice("invalid ring buffer state %d\n", - d->hdr.state); - BUG_ON(port->tx_bufs[txi].skb->next); - - port->tx_bufs[txi].skb->next = skb; - skb = port->tx_bufs[txi].skb; - port->tx_bufs[txi].skb = NULL; - - ldc_unmap(port->vio.lp, - port->tx_bufs[txi].cookies, - port->tx_bufs[txi].ncookies); - } else if (d->hdr.state == VIO_DESC_FREE) - break; - d->hdr.state = VIO_DESC_FREE; - } - return skb; -} - -static inline void vnet_free_skbs(struct sk_buff *skb) -{ - struct sk_buff *next; - - while (skb) { - next = skb->next; - skb->next = NULL; - dev_kfree_skb(skb); - skb = next; - } -} - -static void vnet_clean_timer_expire(unsigned long port0) -{ - struct vnet_port *port = (struct vnet_port *)port0; - struct sk_buff *freeskbs; - unsigned pending; - - netif_tx_lock(port->vp->dev); - freeskbs = vnet_clean_tx_ring(port, &pending); - netif_tx_unlock(port->vp->dev); - - vnet_free_skbs(freeskbs); - - if (pending) - (void)mod_timer(&port->clean_timer, - jiffies + VNET_CLEAN_TIMEOUT); - else - del_timer(&port->clean_timer); -} - -static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, - struct ldc_trans_cookie *cookies, int ncookies, - unsigned int map_perm) -{ - int i, nc, err, blen; - - /* header */ - blen = skb_headlen(skb); - if (blen < ETH_ZLEN) - blen = ETH_ZLEN; - blen += VNET_PACKET_SKIP; - blen += 8 - (blen & 7); - - err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, - ncookies, map_perm); - if (err < 0) - return err; - nc = err; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - u8 *vaddr; - - if (nc < ncookies) { - vaddr = kmap_atomic(skb_frag_page(f)); - blen = skb_frag_size(f); - blen += 8 - (blen & 7); - err = ldc_map_single(lp, vaddr + f->page_offset, - blen, cookies + nc, ncookies - nc, - map_perm); - kunmap_atomic(vaddr); - } else { - err = -EMSGSIZE; - } - - if (err < 0) { - ldc_unmap(lp, cookies, nc); - return err; - } - nc += err; - } - return nc; -} - -static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) -{ - struct sk_buff *nskb; - int i, len, pad, docopy; - - len = skb->len; - pad = 0; - if (len < ETH_ZLEN) { - pad += ETH_ZLEN - skb->len; - len += pad; - } - len += VNET_PACKET_SKIP; - pad += 8 - (len & 7); - - /* make sure we have enough cookies and alignment in every frag */ - docopy = skb_shinfo(skb)->nr_frags >= ncookies; - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; - - docopy |= f->page_offset & 7; - } - if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || - skb_tailroom(skb) < pad || - skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { - int start = 0, offset; - __wsum csum; - - len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; - nskb = alloc_and_align_skb(skb->dev, len); - if (nskb == NULL) { - dev_kfree_skb(skb); - return NULL; - } - skb_reserve(nskb, VNET_PACKET_SKIP); - - nskb->protocol = skb->protocol; - offset = skb_mac_header(skb) - skb->data; - skb_set_mac_header(nskb, offset); - offset = skb_network_header(skb) - skb->data; - skb_set_network_header(nskb, offset); - offset = skb_transport_header(skb) - skb->data; - skb_set_transport_header(nskb, offset); - - offset = 0; - nskb->csum_offset = skb->csum_offset; - nskb->ip_summed = skb->ip_summed; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - start = skb_checksum_start_offset(skb); - if (start) { - struct iphdr *iph = ip_hdr(nskb); - int offset = start + nskb->csum_offset; - - if (skb_copy_bits(skb, 0, nskb->data, start)) { - dev_kfree_skb(nskb); - dev_kfree_skb(skb); - return NULL; - } - *(__sum16 *)(skb->data + offset) = 0; - csum = skb_copy_and_csum_bits(skb, start, - nskb->data + start, - skb->len - start, 0); - if (iph->protocol == IPPROTO_TCP || - iph->protocol == IPPROTO_UDP) { - csum = csum_tcpudp_magic(iph->saddr, iph->daddr, - skb->len - start, - iph->protocol, csum); - } - *(__sum16 *)(nskb->data + offset) = csum; - - nskb->ip_summed = CHECKSUM_NONE; - } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { - dev_kfree_skb(nskb); - dev_kfree_skb(skb); - return NULL; - } - (void)skb_put(nskb, skb->len); - if (skb_is_gso(skb)) { - skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; - skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; - } - nskb->queue_mapping = skb->queue_mapping; - dev_kfree_skb(skb); - skb = nskb; - } - return skb; -} - -static u16 -vnet_select_queue(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) -{ - struct vnet *vp = netdev_priv(dev); - struct vnet_port *port = __tx_port_find(vp, skb); - - if (port == NULL) - return 0; - return port->q_index; -} - -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev); - -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) -{ - struct net_device *dev = port->vp->dev; - struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - struct sk_buff *segs; - int maclen, datalen; - int status; - int gso_size, gso_type, gso_segs; - int hlen = skb_transport_header(skb) - skb_mac_header(skb); - int proto = IPPROTO_IP; - - if (skb->protocol == htons(ETH_P_IP)) - proto = ip_hdr(skb)->protocol; - else if (skb->protocol == htons(ETH_P_IPV6)) - proto = ipv6_hdr(skb)->nexthdr; - - if (proto == IPPROTO_TCP) - hlen += tcp_hdr(skb)->doff * 4; - else if (proto == IPPROTO_UDP) - hlen += sizeof(struct udphdr); - else { - pr_err("vnet_handle_offloads GSO with unknown transport " - "protocol %d tproto %d\n", skb->protocol, proto); - hlen = 128; /* XXX */ - } - datalen = port->tsolen - hlen; - - gso_size = skb_shinfo(skb)->gso_size; - gso_type = skb_shinfo(skb)->gso_type; - gso_segs = skb_shinfo(skb)->gso_segs; - - if (port->tso && gso_size < datalen) - gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); - - if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { - struct netdev_queue *txq; - - txq = netdev_get_tx_queue(dev, port->q_index); - netif_tx_stop_queue(txq); - if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) - return NETDEV_TX_BUSY; - netif_tx_wake_queue(txq); - } - - maclen = skb_network_header(skb) - skb_mac_header(skb); - skb_pull(skb, maclen); - - if (port->tso && gso_size < datalen) { - if (skb_unclone(skb, GFP_ATOMIC)) - goto out_dropped; - - /* segment to TSO size */ - skb_shinfo(skb)->gso_size = datalen; - skb_shinfo(skb)->gso_segs = gso_segs; - } - segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); - if (IS_ERR(segs)) - goto out_dropped; - - skb_push(skb, maclen); - skb_reset_mac_header(skb); - - status = 0; - while (segs) { - struct sk_buff *curr = segs; - - segs = segs->next; - curr->next = NULL; - if (port->tso && curr->len > dev->mtu) { - skb_shinfo(curr)->gso_size = gso_size; - skb_shinfo(curr)->gso_type = gso_type; - skb_shinfo(curr)->gso_segs = - DIV_ROUND_UP(curr->len - hlen, gso_size); - } else - skb_shinfo(curr)->gso_size = 0; - - skb_push(curr, maclen); - skb_reset_mac_header(curr); - memcpy(skb_mac_header(curr), skb_mac_header(skb), - maclen); - curr->csum_start = skb_transport_header(curr) - curr->head; - if (ip_hdr(curr)->protocol == IPPROTO_TCP) - curr->csum_offset = offsetof(struct tcphdr, check); - else if (ip_hdr(curr)->protocol == IPPROTO_UDP) - curr->csum_offset = offsetof(struct udphdr, check); - - if (!(status & NETDEV_TX_MASK)) - status = vnet_start_xmit(curr, dev); - if (status & NETDEV_TX_MASK) - dev_kfree_skb_any(curr); - } - - if (!(status & NETDEV_TX_MASK)) - dev_kfree_skb_any(skb); - return status; -out_dropped: - dev->stats.tx_dropped++; - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; -} - -static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct vnet *vp = netdev_priv(dev); - struct vnet_port *port = NULL; - struct vio_dring_state *dr; - struct vio_net_desc *d; - unsigned int len; - struct sk_buff *freeskbs = NULL; - int i, err, txi; - unsigned pending = 0; - struct netdev_queue *txq; - - rcu_read_lock(); - port = __tx_port_find(vp, skb); - if (unlikely(!port)) { - rcu_read_unlock(); - goto out_dropped; - } - - if (skb_is_gso(skb) && skb->len > port->tsolen) { - err = vnet_handle_offloads(port, skb); - rcu_read_unlock(); - return err; - } - - if (!skb_is_gso(skb) && skb->len > port->rmtu) { - unsigned long localmtu = port->rmtu - ETH_HLEN; - - if (vio_version_after_eq(&port->vio, 1, 3)) - localmtu -= VLAN_HLEN; - - if (skb->protocol == htons(ETH_P_IP)) { - struct flowi4 fl4; - struct rtable *rt = NULL; - - memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_oif = dev->ifindex; - fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); - fl4.daddr = ip_hdr(skb)->daddr; - fl4.saddr = ip_hdr(skb)->saddr; - - rt = ip_route_output_key(dev_net(dev), &fl4); - rcu_read_unlock(); - if (!IS_ERR(rt)) { - skb_dst_set(skb, &rt->dst); - icmp_send(skb, ICMP_DEST_UNREACH, - ICMP_FRAG_NEEDED, - htonl(localmtu)); - } - } -#if IS_ENABLED(CONFIG_IPV6) - else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); -#endif - goto out_dropped; - } - - skb = vnet_skb_shape(skb, 2); - - if (unlikely(!skb)) - goto out_dropped; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - vnet_fullcsum(skb); - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - i = skb_get_queue_mapping(skb); - txq = netdev_get_tx_queue(dev, i); - if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - if (!netif_tx_queue_stopped(txq)) { - netif_tx_stop_queue(txq); - - /* This is a hard error, log it. */ - netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); - dev->stats.tx_errors++; - } - rcu_read_unlock(); - return NETDEV_TX_BUSY; - } - - d = vio_dring_cur(dr); - - txi = dr->prod; - - freeskbs = vnet_clean_tx_ring(port, &pending); - - BUG_ON(port->tx_bufs[txi].skb); - - len = skb->len; - if (len < ETH_ZLEN) - len = ETH_ZLEN; - - err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, - (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); - if (err < 0) { - netdev_info(dev, "tx buffer map error %d\n", err); - goto out_dropped; - } - - port->tx_bufs[txi].skb = skb; - skb = NULL; - port->tx_bufs[txi].ncookies = err; - - /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), - * thus it is safe to not set VIO_ACK_ENABLE for each transmission: - * the protocol itself does not require it as long as the peer - * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. - * - * An ACK for every packet in the ring is expensive as the - * sending of LDC messages is slow and affects performance. - */ - d->hdr.ack = VIO_ACK_DISABLE; - d->size = len; - d->ncookies = port->tx_bufs[txi].ncookies; - for (i = 0; i < d->ncookies; i++) - d->cookies[i] = port->tx_bufs[txi].cookies[i]; - if (vio_version_after_eq(&port->vio, 1, 7)) { - struct vio_net_dext *dext = vio_net_ext(d); - - memset(dext, 0, sizeof(*dext)); - if (skb_is_gso(port->tx_bufs[txi].skb)) { - dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) - ->gso_size; - dext->flags |= VNET_PKT_IPV4_LSO; - } - if (vio_version_after_eq(&port->vio, 1, 8) && - !port->switch_port) { - dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; - dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; - } - } - - /* This has to be a non-SMP write barrier because we are writing - * to memory which is shared with the peer LDOM. - */ - dma_wmb(); - - d->hdr.state = VIO_DESC_READY; - - /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent - * to notify the consumer that some descriptors are READY. - * After that "start" trigger, no additional triggers are needed until - * a DRING_STOPPED is received from the consumer. The dr->cons field - * (set up by vnet_ack()) has the value of the next dring index - * that has not yet been ack-ed. We send a "start" trigger here - * if, and only if, start_cons is true (reset it afterward). Conversely, - * vnet_ack() should check if the dring corresponding to cons - * is marked READY, but start_cons was false. - * If so, vnet_ack() should send out the missed "start" trigger. - * - * Note that the dma_wmb() above makes sure the cookies et al. are - * not globally visible before the VIO_DESC_READY, and that the - * stores are ordered correctly by the compiler. The consumer will - * not proceed until the VIO_DESC_READY is visible assuring that - * the consumer does not observe anything related to descriptors - * out of order. The HV trap from the LDC start trigger is the - * producer to consumer announcement that work is available to the - * consumer - */ - if (!port->start_cons) { /* previous trigger suffices */ - trace_vnet_skip_tx_trigger(port->vio._local_sid, - port->vio._peer_sid, dr->cons); - goto ldc_start_done; - } - - err = __vnet_tx_trigger(port, dr->cons); - if (unlikely(err < 0)) { - netdev_info(dev, "TX trigger error %d\n", err); - d->hdr.state = VIO_DESC_FREE; - skb = port->tx_bufs[txi].skb; - port->tx_bufs[txi].skb = NULL; - dev->stats.tx_carrier_errors++; - goto out_dropped; - } - -ldc_start_done: - port->start_cons = false; - - dev->stats.tx_packets++; - dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; - - dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); - if (unlikely(vnet_tx_dring_avail(dr) < 1)) { - netif_tx_stop_queue(txq); - if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) - netif_tx_wake_queue(txq); - } - - (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); - rcu_read_unlock(); - - vnet_free_skbs(freeskbs); - - return NETDEV_TX_OK; - -out_dropped: - if (pending) - (void)mod_timer(&port->clean_timer, - jiffies + VNET_CLEAN_TIMEOUT); - else if (port) - del_timer(&port->clean_timer); - if (port) - rcu_read_unlock(); - if (skb) - dev_kfree_skb(skb); - vnet_free_skbs(freeskbs); - dev->stats.tx_dropped++; - return NETDEV_TX_OK; -} - -static void vnet_tx_timeout(struct net_device *dev) -{ - /* XXX Implement me XXX */ -} - -static int vnet_open(struct net_device *dev) -{ - netif_carrier_on(dev); - netif_tx_start_all_queues(dev); - - return 0; -} - -static int vnet_close(struct net_device *dev) -{ - netif_tx_stop_all_queues(dev); - netif_carrier_off(dev); - - return 0; -} - -static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) -{ - struct vnet_mcast_entry *m; - - for (m = vp->mcast_list; m; m = m->next) { - if (ether_addr_equal(m->addr, addr)) - return m; - } - return NULL; -} - -static void __update_mc_list(struct vnet *vp, struct net_device *dev) -{ - struct netdev_hw_addr *ha; - - netdev_for_each_mc_addr(ha, dev) { - struct vnet_mcast_entry *m; - - m = __vnet_mc_find(vp, ha->addr); - if (m) { - m->hit = 1; - continue; - } - - if (!m) { - m = kzalloc(sizeof(*m), GFP_ATOMIC); - if (!m) - continue; - memcpy(m->addr, ha->addr, ETH_ALEN); - m->hit = 1; - - m->next = vp->mcast_list; - vp->mcast_list = m; - } - } -} - -static void __send_mc_list(struct vnet *vp, struct vnet_port *port) -{ - struct vio_net_mcast_info info; - struct vnet_mcast_entry *m, **pp; - int n_addrs; - - memset(&info, 0, sizeof(info)); - - info.tag.type = VIO_TYPE_CTRL; - info.tag.stype = VIO_SUBTYPE_INFO; - info.tag.stype_env = VNET_MCAST_INFO; - info.tag.sid = vio_send_sid(&port->vio); - info.set = 1; - - n_addrs = 0; - for (m = vp->mcast_list; m; m = m->next) { - if (m->sent) - continue; - m->sent = 1; - memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], - m->addr, ETH_ALEN); - if (++n_addrs == VNET_NUM_MCAST) { - info.count = n_addrs; - - (void) vio_ldc_send(&port->vio, &info, - sizeof(info)); - n_addrs = 0; - } - } - if (n_addrs) { - info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, sizeof(info)); - } - - info.set = 0; - - n_addrs = 0; - pp = &vp->mcast_list; - while ((m = *pp) != NULL) { - if (m->hit) { - m->hit = 0; - pp = &m->next; - continue; - } - - memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], - m->addr, ETH_ALEN); - if (++n_addrs == VNET_NUM_MCAST) { - info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, - sizeof(info)); - n_addrs = 0; - } - - *pp = m->next; - kfree(m); - } - if (n_addrs) { - info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, sizeof(info)); - } -} - -static void vnet_set_rx_mode(struct net_device *dev) -{ - struct vnet *vp = netdev_priv(dev); - struct vnet_port *port; - - rcu_read_lock(); - list_for_each_entry_rcu(port, &vp->port_list, list) { - - if (port->switch_port) { - __update_mc_list(vp, dev); - __send_mc_list(vp, port); - break; - } - } - rcu_read_unlock(); -} - -static int vnet_change_mtu(struct net_device *dev, int new_mtu) -{ - if (new_mtu < 68 || new_mtu > 65535) - return -EINVAL; - - dev->mtu = new_mtu; - return 0; -} - -static int vnet_set_mac_addr(struct net_device *dev, void *p) -{ - return -EINVAL; -} - static void vnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1660,129 +81,21 @@ static const struct ethtool_ops vnet_ethtool_ops = { .get_link = ethtool_op_get_link, }; -static void vnet_port_free_tx_bufs(struct vnet_port *port) -{ - struct vio_dring_state *dr; - int i; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - - if (dr->base == NULL) - return; - - for (i = 0; i < VNET_TX_RING_SIZE; i++) { - struct vio_net_desc *d; - void *skb = port->tx_bufs[i].skb; - - if (!skb) - continue; - - d = vio_dring_entry(dr, i); - - ldc_unmap(port->vio.lp, - port->tx_bufs[i].cookies, - port->tx_bufs[i].ncookies); - dev_kfree_skb(skb); - port->tx_bufs[i].skb = NULL; - d->hdr.state = VIO_DESC_FREE; - } - ldc_free_exp_dring(port->vio.lp, dr->base, - (dr->entry_size * dr->num_entries), - dr->cookies, dr->ncookies); - dr->base = NULL; - dr->entry_size = 0; - dr->num_entries = 0; - dr->pending = 0; - dr->ncookies = 0; -} - -static void vnet_port_reset(struct vnet_port *port) -{ - del_timer(&port->clean_timer); - vnet_port_free_tx_bufs(port); - port->rmtu = 0; - port->tso = true; - port->tsolen = 0; -} - -static int vnet_port_alloc_tx_ring(struct vnet_port *port) -{ - struct vio_dring_state *dr; - unsigned long len, elen; - int i, err, ncookies; - void *dring; - - dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - - elen = sizeof(struct vio_net_desc) + - sizeof(struct ldc_trans_cookie) * 2; - if (vio_version_after_eq(&port->vio, 1, 7)) - elen += sizeof(struct vio_net_dext); - len = VNET_TX_RING_SIZE * elen; - - ncookies = VIO_MAX_RING_COOKIES; - dring = ldc_alloc_exp_dring(port->vio.lp, len, - dr->cookies, &ncookies, - (LDC_MAP_SHADOW | - LDC_MAP_DIRECT | - LDC_MAP_RW)); - if (IS_ERR(dring)) { - err = PTR_ERR(dring); - goto err_out; - } - - dr->base = dring; - dr->entry_size = elen; - dr->num_entries = VNET_TX_RING_SIZE; - dr->prod = dr->cons = 0; - port->start_cons = true; /* need an initial trigger */ - dr->pending = VNET_TX_RING_SIZE; - dr->ncookies = ncookies; - - for (i = 0; i < VNET_TX_RING_SIZE; ++i) { - struct vio_net_desc *d; - - d = vio_dring_entry(dr, i); - d->hdr.state = VIO_DESC_FREE; - } - return 0; - -err_out: - vnet_port_free_tx_bufs(port); - - return err; -} - -#ifdef CONFIG_NET_POLL_CONTROLLER -static void vnet_poll_controller(struct net_device *dev) -{ - struct vnet *vp = netdev_priv(dev); - struct vnet_port *port; - unsigned long flags; - - spin_lock_irqsave(&vp->lock, flags); - if (!list_empty(&vp->port_list)) { - port = list_entry(vp->port_list.next, struct vnet_port, list); - napi_schedule(&port->napi); - } - spin_unlock_irqrestore(&vp->lock, flags); -} -#endif static LIST_HEAD(vnet_list); static DEFINE_MUTEX(vnet_list_mutex); static const struct net_device_ops vnet_ops = { - .ndo_open = vnet_open, - .ndo_stop = vnet_close, - .ndo_set_rx_mode = vnet_set_rx_mode, - .ndo_set_mac_address = vnet_set_mac_addr, + .ndo_open = sunvnet_open_common, + .ndo_stop = sunvnet_close_common, + .ndo_set_rx_mode = sunvnet_set_rx_mode_common, + .ndo_set_mac_address = sunvnet_set_mac_addr_common, .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = vnet_tx_timeout, - .ndo_change_mtu = vnet_change_mtu, - .ndo_start_xmit = vnet_start_xmit, - .ndo_select_queue = vnet_select_queue, + .ndo_tx_timeout = sunvnet_tx_timeout_common, + .ndo_change_mtu = sunvnet_change_mtu_common, + .ndo_start_xmit = sunvnet_start_xmit_common, + .ndo_select_queue = sunvnet_select_queue_common, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = vnet_poll_controller, + .ndo_poll_controller = sunvnet_poll_controller_common, #endif }; @@ -1908,15 +221,15 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp, } static struct ldc_channel_config vnet_ldc_cfg = { - .event = vnet_event, + .event = sunvnet_event_common, .mtu = 64, .mode = LDC_MODE_UNRELIABLE, }; static struct vio_driver_ops vnet_vio_ops = { - .send_attr = vnet_send_attr, - .handle_attr = vnet_handle_attr, - .handshake_complete = vnet_handshake_complete, + .send_attr = sunvnet_send_attr_common, + .handle_attr = sunvnet_handle_attr_common, + .handshake_complete = sunvnet_handshake_complete_common, }; static void print_version(void) @@ -1926,25 +239,6 @@ static void print_version(void) const char *remote_macaddr_prop = "remote-mac-address"; -static void -vnet_port_add_txq(struct vnet_port *port) -{ - struct vnet *vp = port->vp; - int n; - - n = vp->nports++; - n = n & (VNET_MAX_TXQS - 1); - port->q_index = n; - netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); -} - -static void -vnet_port_rm_txq(struct vnet_port *port) -{ - port->vp->nports--; - netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); -} - static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) { struct mdesc_handle *hp; @@ -1992,7 +286,8 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) if (err) goto err_out_free_port; - netif_napi_add(port->vp->dev, &port->napi, vnet_poll, NAPI_POLL_WEIGHT); + netif_napi_add(port->vp->dev, &port->napi, sunvnet_poll_common, + NAPI_POLL_WEIGHT); INIT_HLIST_NODE(&port->hash); INIT_LIST_HEAD(&port->list); @@ -2011,7 +306,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) list_add_tail_rcu(&port->list, &vp->port_list); hlist_add_head_rcu(&port->hash, &vp->port_hash[vnet_hashfn(port->raddr)]); - vnet_port_add_txq(port); + sunvnet_port_add_txq_common(port); spin_unlock_irqrestore(&vp->lock, flags); dev_set_drvdata(&vdev->dev, port); @@ -2019,7 +314,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) pr_info("%s: PORT ( remote-mac %pM%s )\n", vp->dev->name, port->raddr, switch_port ? " switch-port" : ""); - setup_timer(&port->clean_timer, vnet_clean_timer_expire, + setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common, (unsigned long)port); napi_enable(&port->napi); @@ -2052,9 +347,9 @@ static int vnet_port_remove(struct vio_dev *vdev) synchronize_rcu(); del_timer_sync(&port->clean_timer); - vnet_port_rm_txq(port); + sunvnet_port_rm_txq_common(port); netif_napi_del(&port->napi); - vnet_port_free_tx_bufs(port); + sunvnet_port_free_tx_bufs_common(port); vio_ldc_free(&port->vio); dev_set_drvdata(&vdev->dev, NULL); diff --git a/drivers/net/ethernet/sun/sunvnet.h b/drivers/net/ethernet/sun/sunvnet.h deleted file mode 100644 index 01ca78191683..000000000000 --- a/drivers/net/ethernet/sun/sunvnet.h +++ /dev/null @@ -1,114 +0,0 @@ -#ifndef _SUNVNET_H -#define _SUNVNET_H - -#include - -#define DESC_NCOOKIES(entry_size) \ - ((entry_size) - sizeof(struct vio_net_desc)) - -/* length of time before we decide the hardware is borked, - * and dev->tx_timeout() should be called to fix the problem - */ -#define VNET_TX_TIMEOUT (5 * HZ) - -/* length of time (or less) we expect pending descriptors to be marked - * as VIO_DESC_DONE and skbs ready to be freed - */ -#define VNET_CLEAN_TIMEOUT ((HZ/100)+1) - -#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN) -#define VNET_TX_RING_SIZE 512 -#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) - -#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ -#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ - -/* VNET packets are sent in buffers with the first 6 bytes skipped - * so that after the ethernet header the IPv4/IPv6 headers are aligned - * properly. - */ -#define VNET_PACKET_SKIP 6 - -#define VNET_MAXCOOKIES (VNET_MAXPACKET/PAGE_SIZE + 1) - -struct vnet_tx_entry { - struct sk_buff *skb; - unsigned int ncookies; - struct ldc_trans_cookie cookies[VNET_MAXCOOKIES]; -}; - -struct vnet; -struct vnet_port { - struct vio_driver_state vio; - - struct hlist_node hash; - u8 raddr[ETH_ALEN]; - unsigned switch_port:1; - unsigned tso:1; - unsigned __pad:14; - - struct vnet *vp; - - struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; - - struct list_head list; - - u32 stop_rx_idx; - bool stop_rx; - bool start_cons; - - struct timer_list clean_timer; - - u64 rmtu; - u16 tsolen; - - struct napi_struct napi; - u32 napi_stop_idx; - bool napi_resume; - int rx_event; - u16 q_index; -}; - -static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) -{ - return container_of(vio, struct vnet_port, vio); -} - -#define VNET_PORT_HASH_SIZE 16 -#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) - -static inline unsigned int vnet_hashfn(u8 *mac) -{ - unsigned int val = mac[4] ^ mac[5]; - - return val & (VNET_PORT_HASH_MASK); -} - -struct vnet_mcast_entry { - u8 addr[ETH_ALEN]; - u8 sent; - u8 hit; - struct vnet_mcast_entry *next; -}; - -struct vnet { - /* Protects port_list and port_hash. */ - spinlock_t lock; - - struct net_device *dev; - - u32 msg_enable; - - struct list_head port_list; - - struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; - - struct vnet_mcast_entry *mcast_list; - - struct list_head list; - u64 local_mac; - - int nports; -}; - -#endif /* _SUNVNET_H */ diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c new file mode 100644 index 000000000000..49e85d0d960b --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -0,0 +1,1753 @@ +/* sunvnet.c: Sun LDOM Virtual Network Driver. + * + * Copyright (C) 2007, 2008 David S. Miller + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define CREATE_TRACE_POINTS +#include + +#if IS_ENABLED(CONFIG_IPV6) +#include +#endif + +#include +#include +#include + +#include +#include + +#include "sunvnet_common.h" + +/* Heuristic for the number of times to exponentially backoff and + * retry sending an LDC trigger when EAGAIN is encountered + */ +#define VNET_MAX_RETRIES 10 + +static int __vnet_tx_trigger(struct vnet_port *port, u32 start); +static void vnet_port_reset(struct vnet_port *port); + +static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr) +{ + return vio_dring_avail(dr, VNET_TX_RING_SIZE); +} + +static int vnet_handle_unknown(struct vnet_port *port, void *arg) +{ + struct vio_msg_tag *pkt = arg; + + pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n", + pkt->type, pkt->stype, pkt->stype_env, pkt->sid); + pr_err("Resetting connection\n"); + + ldc_disconnect(port->vio.lp); + + return -ECONNRESET; +} + +static int vnet_port_alloc_tx_ring(struct vnet_port *port); + +int sunvnet_send_attr_common(struct vio_driver_state *vio) +{ + struct vnet_port *port = to_vnet_port(vio); + struct net_device *dev = port->vp->dev; + struct vio_net_attr_info pkt; + int framelen = ETH_FRAME_LEN; + int i, err; + + err = vnet_port_alloc_tx_ring(to_vnet_port(vio)); + if (err) + return err; + + memset(&pkt, 0, sizeof(pkt)); + pkt.tag.type = VIO_TYPE_CTRL; + pkt.tag.stype = VIO_SUBTYPE_INFO; + pkt.tag.stype_env = VIO_ATTR_INFO; + pkt.tag.sid = vio_send_sid(vio); + if (vio_version_before(vio, 1, 2)) + pkt.xfer_mode = VIO_DRING_MODE; + else + pkt.xfer_mode = VIO_NEW_DRING_MODE; + pkt.addr_type = VNET_ADDR_ETHERMAC; + pkt.ack_freq = 0; + for (i = 0; i < 6; i++) + pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); + if (vio_version_after(vio, 1, 3)) { + if (port->rmtu) { + port->rmtu = min(VNET_MAXPACKET, port->rmtu); + pkt.mtu = port->rmtu; + } else { + port->rmtu = VNET_MAXPACKET; + pkt.mtu = port->rmtu; + } + if (vio_version_after_eq(vio, 1, 6)) + pkt.options = VIO_TX_DRING; + } else if (vio_version_before(vio, 1, 3)) { + pkt.mtu = framelen; + } else { /* v1.3 */ + pkt.mtu = framelen + VLAN_HLEN; + } + + pkt.cflags = 0; + if (vio_version_after_eq(vio, 1, 7) && port->tso) { + pkt.cflags |= VNET_LSO_IPV4_CAPAB; + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + pkt.ipv4_lso_maxlen = port->tsolen; + } + + pkt.plnk_updt = PHYSLINK_UPDATE_NONE; + + viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + "cflags[0x%04x] lso_max[%u]\n", + pkt.xfer_mode, pkt.addr_type, + (unsigned long long)pkt.addr, + pkt.ack_freq, pkt.plnk_updt, pkt.options, + (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); + + + return vio_ldc_send(vio, &pkt, sizeof(pkt)); +} +EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); + +static int handle_attr_info(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + struct vnet_port *port = to_vnet_port(vio); + u64 localmtu; + u8 xfer_mode; + + viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] " + "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] " + " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.sid = vio_send_sid(vio); + + xfer_mode = pkt->xfer_mode; + /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */ + if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE) + xfer_mode = VIO_NEW_DRING_MODE; + + /* MTU negotiation: + * < v1.3 - ETH_FRAME_LEN exactly + * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change + * pkt->mtu for ACK + * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly + */ + if (vio_version_before(vio, 1, 3)) { + localmtu = ETH_FRAME_LEN; + } else if (vio_version_after(vio, 1, 3)) { + localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET; + localmtu = min(pkt->mtu, localmtu); + pkt->mtu = localmtu; + } else { /* v1.3 */ + localmtu = ETH_FRAME_LEN + VLAN_HLEN; + } + port->rmtu = localmtu; + + /* LSO negotiation */ + if (vio_version_after_eq(vio, 1, 7)) + port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB); + else + port->tso = false; + if (port->tso) { + if (!port->tsolen) + port->tsolen = VNET_MAXTSO; + port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen); + if (port->tsolen < VNET_MINTSO) { + port->tso = false; + port->tsolen = 0; + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + } + pkt->ipv4_lso_maxlen = port->tsolen; + } else { + pkt->cflags &= ~VNET_LSO_IPV4_CAPAB; + pkt->ipv4_lso_maxlen = 0; + } + + /* for version >= 1.6, ACK packet mode we support */ + if (vio_version_after_eq(vio, 1, 6)) { + pkt->xfer_mode = VIO_NEW_DRING_MODE; + pkt->options = VIO_TX_DRING; + } + + if (!(xfer_mode | VIO_NEW_DRING_MODE) || + pkt->addr_type != VNET_ADDR_ETHERMAC || + pkt->mtu != localmtu) { + viodbg(HS, "SEND NET ATTR NACK\n"); + + pkt->tag.stype = VIO_SUBTYPE_NACK; + + (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); + + return -ECONNRESET; + } else { + viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " + "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " + "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.stype = VIO_SUBTYPE_ACK; + + return vio_ldc_send(vio, pkt, sizeof(*pkt)); + } + +} + +static int handle_attr_ack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR ACK\n"); + + return 0; +} + +static int handle_attr_nack(struct vio_driver_state *vio, + struct vio_net_attr_info *pkt) +{ + viodbg(HS, "GOT NET ATTR NACK\n"); + + return -ECONNRESET; +} + +int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg) +{ + struct vio_net_attr_info *pkt = arg; + + switch (pkt->tag.stype) { + case VIO_SUBTYPE_INFO: + return handle_attr_info(vio, pkt); + + case VIO_SUBTYPE_ACK: + return handle_attr_ack(vio, pkt); + + case VIO_SUBTYPE_NACK: + return handle_attr_nack(vio, pkt); + + default: + return -ECONNRESET; + } +} +EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common); + +void sunvnet_handshake_complete_common(struct vio_driver_state *vio) +{ + struct vio_dring_state *dr; + + dr = &vio->drings[VIO_DRIVER_RX_RING]; + dr->snd_nxt = dr->rcv_nxt = 1; + + dr = &vio->drings[VIO_DRIVER_TX_RING]; + dr->snd_nxt = dr->rcv_nxt = 1; +} +EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); + +/* The hypervisor interface that implements copying to/from imported + * memory from another domain requires that copies are done to 8-byte + * aligned buffers, and that the lengths of such copies are also 8-byte + * multiples. + * + * So we align skb->data to an 8-byte multiple and pad-out the data + * area so we can round the copy length up to the next multiple of + * 8 for the copy. + * + * The transmitter puts the actual start of the packet 6 bytes into + * the buffer it sends over, so that the IP headers after the ethernet + * header are aligned properly. These 6 bytes are not in the descriptor + * length, they are simply implied. This offset is represented using + * the VNET_PACKET_SKIP macro. + */ +static struct sk_buff *alloc_and_align_skb(struct net_device *dev, + unsigned int len) +{ + struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); + unsigned long addr, off; + + if (unlikely(!skb)) + return NULL; + + addr = (unsigned long) skb->data; + off = ((addr + 7UL) & ~7UL) - addr; + if (off) + skb_reserve(skb, off); + + return skb; +} + +static inline void vnet_fullcsum(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + int offset = skb_transport_offset(skb); + + if (skb->protocol != htons(ETH_P_IP)) + return; + if (iph->protocol != IPPROTO_TCP && + iph->protocol != IPPROTO_UDP) + return; + skb->ip_summed = CHECKSUM_NONE; + skb->csum_level = 1; + skb->csum = 0; + if (iph->protocol == IPPROTO_TCP) { + struct tcphdr *ptcp = tcp_hdr(skb); + + ptcp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_TCP, + skb->csum); + } else if (iph->protocol == IPPROTO_UDP) { + struct udphdr *pudp = udp_hdr(skb); + + pudp->check = 0; + skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); + pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - offset, IPPROTO_UDP, + skb->csum); + } +} + +static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) +{ + struct net_device *dev = port->vp->dev; + unsigned int len = desc->size; + unsigned int copy_len; + struct sk_buff *skb; + int maxlen; + int err; + + err = -EMSGSIZE; + if (port->tso && port->tsolen > port->rmtu) + maxlen = port->tsolen; + else + maxlen = port->rmtu; + if (unlikely(len < ETH_ZLEN || len > maxlen)) { + dev->stats.rx_length_errors++; + goto out_dropped; + } + + skb = alloc_and_align_skb(dev, len); + err = -ENOMEM; + if (unlikely(!skb)) { + dev->stats.rx_missed_errors++; + goto out_dropped; + } + + copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U; + skb_put(skb, copy_len); + err = ldc_copy(port->vio.lp, LDC_COPY_IN, + skb->data, copy_len, 0, + desc->cookies, desc->ncookies); + if (unlikely(err < 0)) { + dev->stats.rx_frame_errors++; + goto out_free_skb; + } + + skb_pull(skb, VNET_PACKET_SKIP); + skb_trim(skb, len); + skb->protocol = eth_type_trans(skb, dev); + + if (vio_version_after_eq(&port->vio, 1, 8)) { + struct vio_net_dext *dext = vio_net_ext(desc); + + skb_reset_network_header(skb); + + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { + if (skb->protocol == ETH_P_IP) { + struct iphdr *iph = ip_hdr(skb); + + iph->check = 0; + ip_send_check(iph); + } + } + if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && + skb->ip_summed == CHECKSUM_NONE) { + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl * 4; + + skb_reset_transport_header(skb); + skb_set_transport_header(skb, ihl); + vnet_fullcsum(skb); + } + } + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { + skb->ip_summed = CHECKSUM_PARTIAL; + skb->csum_level = 0; + if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK) + skb->csum_level = 1; + } + } + + skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL; + + dev->stats.rx_packets++; + dev->stats.rx_bytes += len; + napi_gro_receive(&port->napi, skb); + return 0; + +out_free_skb: + kfree_skb(skb); + +out_dropped: + dev->stats.rx_dropped++; + return err; +} + +static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, u8 vio_dring_state) +{ + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_ACK, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = end, + .state = vio_dring_state, + }; + int err, delay; + int retries = 0; + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + if (retries++ > VNET_MAX_RETRIES) { + pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n", + port->raddr[0], port->raddr[1], + port->raddr[2], port->raddr[3], + port->raddr[4], port->raddr[5]); + break; + } + } while (err == -EAGAIN); + + if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) { + port->stop_rx_idx = end; + port->stop_rx = true; + } else { + port->stop_rx_idx = 0; + port->stop_rx = false; + } + + return err; +} + +static struct vio_net_desc *get_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index) +{ + struct vio_net_desc *desc = port->vio.desc_buf; + int err; + + err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return ERR_PTR(err); + + return desc; +} + +static int put_rx_desc(struct vnet_port *port, + struct vio_dring_state *dr, + struct vio_net_desc *desc, + u32 index) +{ + int err; + + err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size, + (index * dr->entry_size), + dr->cookies, dr->ncookies); + if (err < 0) + return err; + + return 0; +} + +static int vnet_walk_rx_one(struct vnet_port *port, + struct vio_dring_state *dr, + u32 index, int *needs_ack) +{ + struct vio_net_desc *desc = get_rx_desc(port, dr, index); + struct vio_driver_state *vio = &port->vio; + int err; + + BUG_ON(desc == NULL); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + if (desc->hdr.state != VIO_DESC_READY) + return 1; + + dma_rmb(); + + viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", + desc->hdr.state, desc->hdr.ack, + desc->size, desc->ncookies, + desc->cookies[0].cookie_addr, + desc->cookies[0].cookie_size); + + err = vnet_rx_one(port, desc); + if (err == -ECONNRESET) + return err; + trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid, + index, desc->hdr.ack); + desc->hdr.state = VIO_DESC_DONE; + err = put_rx_desc(port, dr, desc, index); + if (err < 0) + return err; + *needs_ack = desc->hdr.ack; + return 0; +} + +static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, + u32 start, u32 end, int *npkts, int budget) +{ + struct vio_driver_state *vio = &port->vio; + int ack_start = -1, ack_end = -1; + bool send_ack = true; + + end = (end == (u32) -1) ? vio_dring_prev(dr, start) + : vio_dring_next(dr, end); + + viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); + + while (start != end) { + int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); + if (err == -ECONNRESET) + return err; + if (err != 0) + break; + (*npkts)++; + if (ack_start == -1) + ack_start = start; + ack_end = start; + start = vio_dring_next(dr, start); + if (ack && start != end) { + err = vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_ACTIVE); + if (err == -ECONNRESET) + return err; + ack_start = -1; + } + if ((*npkts) >= budget) { + send_ack = false; + break; + } + } + if (unlikely(ack_start == -1)) + ack_start = ack_end = vio_dring_prev(dr, start); + if (send_ack) { + port->napi_resume = false; + trace_vnet_tx_send_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); + return vnet_send_ack(port, dr, ack_start, ack_end, + VIO_DRING_STOPPED); + } else { + trace_vnet_tx_defer_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + ack_end, *npkts); + port->napi_resume = true; + port->napi_stop_idx = ack_end; + return 1; + } +} + +static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts, + int budget) +{ + struct vio_dring_data *pkt = msgbuf; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING]; + struct vio_driver_state *vio = &port->vio; + + viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n", + pkt->tag.stype_env, pkt->seq, dr->rcv_nxt); + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + if (unlikely(pkt->seq != dr->rcv_nxt)) { + pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n", + pkt->seq, dr->rcv_nxt); + return 0; + } + + if (!port->napi_resume) + dr->rcv_nxt++; + + /* XXX Validate pkt->start_idx and pkt->end_idx XXX */ + + return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx, + npkts, budget); +} + +static int idx_is_pending(struct vio_dring_state *dr, u32 end) +{ + u32 idx = dr->cons; + int found = 0; + + while (idx != dr->prod) { + if (idx == end) { + found = 1; + break; + } + idx = vio_dring_next(dr, idx); + } + return found; +} + +static int vnet_ack(struct vnet_port *port, void *msgbuf) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data *pkt = msgbuf; + struct net_device *dev; + struct vnet *vp; + u32 end; + struct vio_net_desc *desc; + struct netdev_queue *txq; + + if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA)) + return 0; + + end = pkt->end_idx; + vp = port->vp; + dev = vp->dev; + netif_tx_lock(dev); + if (unlikely(!idx_is_pending(dr, end))) { + netif_tx_unlock(dev); + return 0; + } + + /* sync for race conditions with vnet_start_xmit() and tell xmit it + * is time to send a trigger. + */ + trace_vnet_rx_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, end); + dr->cons = vio_dring_next(dr, end); + desc = vio_dring_entry(dr, dr->cons); + if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) { + /* vnet_start_xmit() just populated this dring but missed + * sending the "start" LDC message to the consumer. + * Send a "start" trigger on its behalf. + */ + if (__vnet_tx_trigger(port, dr->cons) > 0) + port->start_cons = false; + else + port->start_cons = true; + } else { + port->start_cons = true; + } + netif_tx_unlock(dev); + + txq = netdev_get_tx_queue(dev, port->q_index); + if (unlikely(netif_tx_queue_stopped(txq) && + vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr))) + return 1; + + return 0; +} + +static int vnet_nack(struct vnet_port *port, void *msgbuf) +{ + /* XXX just reset or similar XXX */ + return 0; +} + +static int handle_mcast(struct vnet_port *port, void *msgbuf) +{ + struct vio_net_mcast_info *pkt = msgbuf; + + if (pkt->tag.stype != VIO_SUBTYPE_ACK) + pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", + port->vp->dev->name, + pkt->tag.type, + pkt->tag.stype, + pkt->tag.stype_env, + pkt->tag.sid); + + return 0; +} + +/* Got back a STOPPED LDC message on port. If the queue is stopped, + * wake it up so that we'll send out another START message at the + * next TX. + */ +static void maybe_tx_wakeup(struct vnet_port *port) +{ + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(port->vp->dev, port->q_index); + __netif_tx_lock(txq, smp_processor_id()); + if (likely(netif_tx_queue_stopped(txq))) { + struct vio_dring_state *dr; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + netif_tx_wake_queue(txq); + } + __netif_tx_unlock(txq); +} + +static inline bool port_is_up(struct vnet_port *vnet) +{ + struct vio_driver_state *vio = &vnet->vio; + + return !!(vio->hs_state & VIO_HS_COMPLETE); +} + +static int vnet_event_napi(struct vnet_port *port, int budget) +{ + struct vio_driver_state *vio = &port->vio; + int tx_wakeup, err; + int npkts = 0; + int event = (port->rx_event & LDC_EVENT_RESET); + +ldc_ctrl: + if (unlikely(event == LDC_EVENT_RESET || + event == LDC_EVENT_UP)) { + vio_link_state_change(vio, event); + + if (event == LDC_EVENT_RESET) { + vnet_port_reset(port); + vio_port_up(vio); + } + port->rx_event = 0; + return 0; + } + /* We may have multiple LDC events in rx_event. Unroll send_events() */ + event = (port->rx_event & LDC_EVENT_UP); + port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); + if (event == LDC_EVENT_UP) + goto ldc_ctrl; + event = port->rx_event; + if (!(event & LDC_EVENT_DATA_READY)) + return 0; + + /* we dont expect any other bits than RESET, UP, DATA_READY */ + BUG_ON(event != LDC_EVENT_DATA_READY); + + tx_wakeup = err = 0; + while (1) { + union { + struct vio_msg_tag tag; + u64 raw[8]; + } msgbuf; + + if (port->napi_resume) { + struct vio_dring_data *pkt = + (struct vio_dring_data *)&msgbuf; + struct vio_dring_state *dr = + &port->vio.drings[VIO_DRIVER_RX_RING]; + + pkt->tag.type = VIO_TYPE_DATA; + pkt->tag.stype = VIO_SUBTYPE_INFO; + pkt->tag.stype_env = VIO_DRING_DATA; + pkt->seq = dr->rcv_nxt; + pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); + pkt->end_idx = -1; + goto napi_resume; + } + err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf)); + if (unlikely(err < 0)) { + if (err == -ECONNRESET) + vio_conn_reset(vio); + break; + } + if (err == 0) + break; + viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n", + msgbuf.tag.type, + msgbuf.tag.stype, + msgbuf.tag.stype_env, + msgbuf.tag.sid); + err = vio_validate_sid(vio, &msgbuf.tag); + if (err < 0) + break; +napi_resume: + if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { + if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { + if (!port_is_up(port)) { + /* failures like handshake_failure() + * may have cleaned up dring, but + * NAPI polling may bring us here. + */ + err = -ECONNRESET; + break; + } + err = vnet_rx(port, &msgbuf, &npkts, budget); + if (npkts >= budget) + break; + if (npkts == 0) + break; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) { + err = vnet_ack(port, &msgbuf); + if (err > 0) + tx_wakeup |= err; + } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) { + err = vnet_nack(port, &msgbuf); + } + } else if (msgbuf.tag.type == VIO_TYPE_CTRL) { + if (msgbuf.tag.stype_env == VNET_MCAST_INFO) + err = handle_mcast(port, &msgbuf); + else + err = vio_control_pkt_engine(vio, &msgbuf); + if (err) + break; + } else { + err = vnet_handle_unknown(port, &msgbuf); + } + if (err == -ECONNRESET) + break; + } + if (unlikely(tx_wakeup && err != -ECONNRESET)) + maybe_tx_wakeup(port); + return npkts; +} + +int sunvnet_poll_common(struct napi_struct *napi, int budget) +{ + struct vnet_port *port = container_of(napi, struct vnet_port, napi); + struct vio_driver_state *vio = &port->vio; + int processed = vnet_event_napi(port, budget); + + if (processed < budget) { + napi_complete(napi); + port->rx_event &= ~LDC_EVENT_DATA_READY; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED); + } + return processed; +} +EXPORT_SYMBOL_GPL(sunvnet_poll_common); + +void sunvnet_event_common(void *arg, int event) +{ + struct vnet_port *port = arg; + struct vio_driver_state *vio = &port->vio; + + port->rx_event |= event; + vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); + napi_schedule(&port->napi); + +} +EXPORT_SYMBOL_GPL(sunvnet_event_common); + +static int __vnet_tx_trigger(struct vnet_port *port, u32 start) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct vio_dring_data hdr = { + .tag = { + .type = VIO_TYPE_DATA, + .stype = VIO_SUBTYPE_INFO, + .stype_env = VIO_DRING_DATA, + .sid = vio_send_sid(&port->vio), + }, + .dring_ident = dr->ident, + .start_idx = start, + .end_idx = (u32) -1, + }; + int err, delay; + int retries = 0; + + if (port->stop_rx) { + trace_vnet_tx_pending_stopped_ack(port->vio._local_sid, + port->vio._peer_sid, + port->stop_rx_idx, -1); + err = vnet_send_ack(port, + &port->vio.drings[VIO_DRIVER_RX_RING], + port->stop_rx_idx, -1, + VIO_DRING_STOPPED); + if (err <= 0) + return err; + } + + hdr.seq = dr->snd_nxt; + delay = 1; + do { + err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr)); + if (err > 0) { + dr->snd_nxt++; + break; + } + udelay(delay); + if ((delay <<= 1) > 128) + delay = 128; + if (retries++ > VNET_MAX_RETRIES) + break; + } while (err == -EAGAIN); + trace_vnet_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, start, err); + + return err; +} + +static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) +{ + unsigned int hash = vnet_hashfn(skb->data); + struct hlist_head *hp = &vp->port_hash[hash]; + struct vnet_port *port; + + hlist_for_each_entry_rcu(port, hp, hash) { + if (!port_is_up(port)) + continue; + if (ether_addr_equal(port->raddr, skb->data)) + return port; + } + list_for_each_entry_rcu(port, &vp->port_list, list) { + if (!port->switch_port) + continue; + if (!port_is_up(port)) + continue; + return port; + } + return NULL; +} + +static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, + unsigned *pending) +{ + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *skb = NULL; + int i, txi; + + *pending = 0; + + txi = dr->prod; + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + --txi; + if (txi < 0) + txi = VNET_TX_RING_SIZE-1; + + d = vio_dring_entry(dr, txi); + + if (d->hdr.state == VIO_DESC_READY) { + (*pending)++; + continue; + } + if (port->tx_bufs[txi].skb) { + if (d->hdr.state != VIO_DESC_DONE) + pr_notice("invalid ring buffer state %d\n", + d->hdr.state); + BUG_ON(port->tx_bufs[txi].skb->next); + + port->tx_bufs[txi].skb->next = skb; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + + ldc_unmap(port->vio.lp, + port->tx_bufs[txi].cookies, + port->tx_bufs[txi].ncookies); + } else if (d->hdr.state == VIO_DESC_FREE) + break; + d->hdr.state = VIO_DESC_FREE; + } + return skb; +} + +static inline void vnet_free_skbs(struct sk_buff *skb) +{ + struct sk_buff *next; + + while (skb) { + next = skb->next; + skb->next = NULL; + dev_kfree_skb(skb); + skb = next; + } +} + +void sunvnet_clean_timer_expire_common(unsigned long port0) +{ + struct vnet_port *port = (struct vnet_port *)port0; + struct sk_buff *freeskbs; + unsigned pending; + + netif_tx_lock(port->vp->dev); + freeskbs = vnet_clean_tx_ring(port, &pending); + netif_tx_unlock(port->vp->dev); + + vnet_free_skbs(freeskbs); + + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else + del_timer(&port->clean_timer); +} +EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common); + +static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, + struct ldc_trans_cookie *cookies, int ncookies, + unsigned int map_perm) +{ + int i, nc, err, blen; + + /* header */ + blen = skb_headlen(skb); + if (blen < ETH_ZLEN) + blen = ETH_ZLEN; + blen += VNET_PACKET_SKIP; + blen += 8 - (blen & 7); + + err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, + ncookies, map_perm); + if (err < 0) + return err; + nc = err; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + u8 *vaddr; + + if (nc < ncookies) { + vaddr = kmap_atomic(skb_frag_page(f)); + blen = skb_frag_size(f); + blen += 8 - (blen & 7); + err = ldc_map_single(lp, vaddr + f->page_offset, + blen, cookies + nc, ncookies - nc, + map_perm); + kunmap_atomic(vaddr); + } else { + err = -EMSGSIZE; + } + + if (err < 0) { + ldc_unmap(lp, cookies, nc); + return err; + } + nc += err; + } + return nc; +} + +static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) +{ + struct sk_buff *nskb; + int i, len, pad, docopy; + + len = skb->len; + pad = 0; + if (len < ETH_ZLEN) { + pad += ETH_ZLEN - skb->len; + len += pad; + } + len += VNET_PACKET_SKIP; + pad += 8 - (len & 7); + + /* make sure we have enough cookies and alignment in every frag */ + docopy = skb_shinfo(skb)->nr_frags >= ncookies; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *f = &skb_shinfo(skb)->frags[i]; + + docopy |= f->page_offset & 7; + } + if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP || + skb_tailroom(skb) < pad || + skb_headroom(skb) < VNET_PACKET_SKIP || docopy) { + int start = 0, offset; + __wsum csum; + + len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; + nskb = alloc_and_align_skb(skb->dev, len); + if (nskb == NULL) { + dev_kfree_skb(skb); + return NULL; + } + skb_reserve(nskb, VNET_PACKET_SKIP); + + nskb->protocol = skb->protocol; + offset = skb_mac_header(skb) - skb->data; + skb_set_mac_header(nskb, offset); + offset = skb_network_header(skb) - skb->data; + skb_set_network_header(nskb, offset); + offset = skb_transport_header(skb) - skb->data; + skb_set_transport_header(nskb, offset); + + offset = 0; + nskb->csum_offset = skb->csum_offset; + nskb->ip_summed = skb->ip_summed; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + start = skb_checksum_start_offset(skb); + if (start) { + struct iphdr *iph = ip_hdr(nskb); + int offset = start + nskb->csum_offset; + + if (skb_copy_bits(skb, 0, nskb->data, start)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + *(__sum16 *)(skb->data + offset) = 0; + csum = skb_copy_and_csum_bits(skb, start, + nskb->data + start, + skb->len - start, 0); + if (iph->protocol == IPPROTO_TCP || + iph->protocol == IPPROTO_UDP) { + csum = csum_tcpudp_magic(iph->saddr, iph->daddr, + skb->len - start, + iph->protocol, csum); + } + *(__sum16 *)(nskb->data + offset) = csum; + + nskb->ip_summed = CHECKSUM_NONE; + } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) { + dev_kfree_skb(nskb); + dev_kfree_skb(skb); + return NULL; + } + (void)skb_put(nskb, skb->len); + if (skb_is_gso(skb)) { + skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size; + skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type; + } + nskb->queue_mapping = skb->queue_mapping; + dev_kfree_skb(skb); + skb = nskb; + } + return skb; +} + +u16 sunvnet_select_queue_common(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = __tx_port_find(vp, skb); + + if (port == NULL) + return 0; + return port->q_index; +} +EXPORT_SYMBOL_GPL(sunvnet_select_queue_common); + +static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) +{ + struct net_device *dev = port->vp->dev; + struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + struct sk_buff *segs; + int maclen, datalen; + int status; + int gso_size, gso_type, gso_segs; + int hlen = skb_transport_header(skb) - skb_mac_header(skb); + int proto = IPPROTO_IP; + + if (skb->protocol == htons(ETH_P_IP)) + proto = ip_hdr(skb)->protocol; + else if (skb->protocol == htons(ETH_P_IPV6)) + proto = ipv6_hdr(skb)->nexthdr; + + if (proto == IPPROTO_TCP) + hlen += tcp_hdr(skb)->doff * 4; + else if (proto == IPPROTO_UDP) + hlen += sizeof(struct udphdr); + else { + pr_err("vnet_handle_offloads GSO with unknown transport " + "protocol %d tproto %d\n", skb->protocol, proto); + hlen = 128; /* XXX */ + } + datalen = port->tsolen - hlen; + + gso_size = skb_shinfo(skb)->gso_size; + gso_type = skb_shinfo(skb)->gso_type; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (port->tso && gso_size < datalen) + gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen); + + if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, port->q_index); + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs) + return NETDEV_TX_BUSY; + netif_tx_wake_queue(txq); + } + + maclen = skb_network_header(skb) - skb_mac_header(skb); + skb_pull(skb, maclen); + + if (port->tso && gso_size < datalen) { + if (skb_unclone(skb, GFP_ATOMIC)) + goto out_dropped; + + /* segment to TSO size */ + skb_shinfo(skb)->gso_size = datalen; + skb_shinfo(skb)->gso_segs = gso_segs; + } + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); + if (IS_ERR(segs)) + goto out_dropped; + + skb_push(skb, maclen); + skb_reset_mac_header(skb); + + status = 0; + while (segs) { + struct sk_buff *curr = segs; + + segs = segs->next; + curr->next = NULL; + if (port->tso && curr->len > dev->mtu) { + skb_shinfo(curr)->gso_size = gso_size; + skb_shinfo(curr)->gso_type = gso_type; + skb_shinfo(curr)->gso_segs = + DIV_ROUND_UP(curr->len - hlen, gso_size); + } else + skb_shinfo(curr)->gso_size = 0; + + skb_push(curr, maclen); + skb_reset_mac_header(curr); + memcpy(skb_mac_header(curr), skb_mac_header(skb), + maclen); + curr->csum_start = skb_transport_header(curr) - curr->head; + if (ip_hdr(curr)->protocol == IPPROTO_TCP) + curr->csum_offset = offsetof(struct tcphdr, check); + else if (ip_hdr(curr)->protocol == IPPROTO_UDP) + curr->csum_offset = offsetof(struct udphdr, check); + + if (!(status & NETDEV_TX_MASK)) + status = sunvnet_start_xmit_common(curr, dev); + if (status & NETDEV_TX_MASK) + dev_kfree_skb_any(curr); + } + + if (!(status & NETDEV_TX_MASK)) + dev_kfree_skb_any(skb); + return status; +out_dropped: + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = NULL; + struct vio_dring_state *dr; + struct vio_net_desc *d; + unsigned int len; + struct sk_buff *freeskbs = NULL; + int i, err, txi; + unsigned pending = 0; + struct netdev_queue *txq; + + rcu_read_lock(); + port = __tx_port_find(vp, skb); + if (unlikely(!port)) { + rcu_read_unlock(); + goto out_dropped; + } + + if (skb_is_gso(skb) && skb->len > port->tsolen) { + err = vnet_handle_offloads(port, skb); + rcu_read_unlock(); + return err; + } + + if (!skb_is_gso(skb) && skb->len > port->rmtu) { + unsigned long localmtu = port->rmtu - ETH_HLEN; + + if (vio_version_after_eq(&port->vio, 1, 3)) + localmtu -= VLAN_HLEN; + + if (skb->protocol == htons(ETH_P_IP)) { + struct flowi4 fl4; + struct rtable *rt = NULL; + + memset(&fl4, 0, sizeof(fl4)); + fl4.flowi4_oif = dev->ifindex; + fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos); + fl4.daddr = ip_hdr(skb)->daddr; + fl4.saddr = ip_hdr(skb)->saddr; + + rt = ip_route_output_key(dev_net(dev), &fl4); + rcu_read_unlock(); + if (!IS_ERR(rt)) { + skb_dst_set(skb, &rt->dst); + icmp_send(skb, ICMP_DEST_UNREACH, + ICMP_FRAG_NEEDED, + htonl(localmtu)); + } + } +#if IS_ENABLED(CONFIG_IPV6) + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu); +#endif + goto out_dropped; + } + + skb = vnet_skb_shape(skb, 2); + + if (unlikely(!skb)) + goto out_dropped; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + vnet_fullcsum(skb); + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + i = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, i); + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + if (!netif_tx_queue_stopped(txq)) { + netif_tx_stop_queue(txq); + + /* This is a hard error, log it. */ + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); + dev->stats.tx_errors++; + } + rcu_read_unlock(); + return NETDEV_TX_BUSY; + } + + d = vio_dring_cur(dr); + + txi = dr->prod; + + freeskbs = vnet_clean_tx_ring(port, &pending); + + BUG_ON(port->tx_bufs[txi].skb); + + len = skb->len; + if (len < ETH_ZLEN) + len = ETH_ZLEN; + + err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2, + (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW)); + if (err < 0) { + netdev_info(dev, "tx buffer map error %d\n", err); + goto out_dropped; + } + + port->tx_bufs[txi].skb = skb; + skb = NULL; + port->tx_bufs[txi].ncookies = err; + + /* We don't rely on the ACKs to free the skb in vnet_start_xmit(), + * thus it is safe to not set VIO_ACK_ENABLE for each transmission: + * the protocol itself does not require it as long as the peer + * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED. + * + * An ACK for every packet in the ring is expensive as the + * sending of LDC messages is slow and affects performance. + */ + d->hdr.ack = VIO_ACK_DISABLE; + d->size = len; + d->ncookies = port->tx_bufs[txi].ncookies; + for (i = 0; i < d->ncookies; i++) + d->cookies[i] = port->tx_bufs[txi].cookies[i]; + if (vio_version_after_eq(&port->vio, 1, 7)) { + struct vio_net_dext *dext = vio_net_ext(d); + + memset(dext, 0, sizeof(*dext)); + if (skb_is_gso(port->tx_bufs[txi].skb)) { + dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb) + ->gso_size; + dext->flags |= VNET_PKT_IPV4_LSO; + } + if (vio_version_after_eq(&port->vio, 1, 8) && + !port->switch_port) { + dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK; + dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK; + } + } + + /* This has to be a non-SMP write barrier because we are writing + * to memory which is shared with the peer LDOM. + */ + dma_wmb(); + + d->hdr.state = VIO_DESC_READY; + + /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent + * to notify the consumer that some descriptors are READY. + * After that "start" trigger, no additional triggers are needed until + * a DRING_STOPPED is received from the consumer. The dr->cons field + * (set up by vnet_ack()) has the value of the next dring index + * that has not yet been ack-ed. We send a "start" trigger here + * if, and only if, start_cons is true (reset it afterward). Conversely, + * vnet_ack() should check if the dring corresponding to cons + * is marked READY, but start_cons was false. + * If so, vnet_ack() should send out the missed "start" trigger. + * + * Note that the dma_wmb() above makes sure the cookies et al. are + * not globally visible before the VIO_DESC_READY, and that the + * stores are ordered correctly by the compiler. The consumer will + * not proceed until the VIO_DESC_READY is visible assuring that + * the consumer does not observe anything related to descriptors + * out of order. The HV trap from the LDC start trigger is the + * producer to consumer announcement that work is available to the + * consumer + */ + if (!port->start_cons) { /* previous trigger suffices */ + trace_vnet_skip_tx_trigger(port->vio._local_sid, + port->vio._peer_sid, dr->cons); + goto ldc_start_done; + } + + err = __vnet_tx_trigger(port, dr->cons); + if (unlikely(err < 0)) { + netdev_info(dev, "TX trigger error %d\n", err); + d->hdr.state = VIO_DESC_FREE; + skb = port->tx_bufs[txi].skb; + port->tx_bufs[txi].skb = NULL; + dev->stats.tx_carrier_errors++; + goto out_dropped; + } + +ldc_start_done: + port->start_cons = false; + + dev->stats.tx_packets++; + dev->stats.tx_bytes += port->tx_bufs[txi].skb->len; + + dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); + if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + netif_tx_stop_queue(txq); + if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) + netif_tx_wake_queue(txq); + } + + (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT); + rcu_read_unlock(); + + vnet_free_skbs(freeskbs); + + return NETDEV_TX_OK; + +out_dropped: + if (pending) + (void)mod_timer(&port->clean_timer, + jiffies + VNET_CLEAN_TIMEOUT); + else if (port) + del_timer(&port->clean_timer); + if (port) + rcu_read_unlock(); + if (skb) + dev_kfree_skb(skb); + vnet_free_skbs(freeskbs); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; +} +EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common); + +void sunvnet_tx_timeout_common(struct net_device *dev) +{ + /* XXX Implement me XXX */ +} +EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common); + +int sunvnet_open_common(struct net_device *dev) +{ + netif_carrier_on(dev); + netif_tx_start_all_queues(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(sunvnet_open_common); + +int sunvnet_close_common(struct net_device *dev) +{ + netif_tx_stop_all_queues(dev); + netif_carrier_off(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(sunvnet_close_common); + +static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr) +{ + struct vnet_mcast_entry *m; + + for (m = vp->mcast_list; m; m = m->next) { + if (ether_addr_equal(m->addr, addr)) + return m; + } + return NULL; +} + +static void __update_mc_list(struct vnet *vp, struct net_device *dev) +{ + struct netdev_hw_addr *ha; + + netdev_for_each_mc_addr(ha, dev) { + struct vnet_mcast_entry *m; + + m = __vnet_mc_find(vp, ha->addr); + if (m) { + m->hit = 1; + continue; + } + + if (!m) { + m = kzalloc(sizeof(*m), GFP_ATOMIC); + if (!m) + continue; + memcpy(m->addr, ha->addr, ETH_ALEN); + m->hit = 1; + + m->next = vp->mcast_list; + vp->mcast_list = m; + } + } +} + +static void __send_mc_list(struct vnet *vp, struct vnet_port *port) +{ + struct vio_net_mcast_info info; + struct vnet_mcast_entry *m, **pp; + int n_addrs; + + memset(&info, 0, sizeof(info)); + + info.tag.type = VIO_TYPE_CTRL; + info.tag.stype = VIO_SUBTYPE_INFO; + info.tag.stype_env = VNET_MCAST_INFO; + info.tag.sid = vio_send_sid(&port->vio); + info.set = 1; + + n_addrs = 0; + for (m = vp->mcast_list; m; m = m->next) { + if (m->sent) + continue; + m->sent = 1; + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } + + info.set = 0; + + n_addrs = 0; + pp = &vp->mcast_list; + while ((m = *pp) != NULL) { + if (m->hit) { + m->hit = 0; + pp = &m->next; + continue; + } + + memcpy(&info.mcast_addr[n_addrs * ETH_ALEN], + m->addr, ETH_ALEN); + if (++n_addrs == VNET_NUM_MCAST) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, + sizeof(info)); + n_addrs = 0; + } + + *pp = m->next; + kfree(m); + } + if (n_addrs) { + info.count = n_addrs; + (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + } +} + +void sunvnet_set_rx_mode_common(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + + rcu_read_lock(); + list_for_each_entry_rcu(port, &vp->port_list, list) { + + if (port->switch_port) { + __update_mc_list(vp, dev); + __send_mc_list(vp, port); + break; + } + } + rcu_read_unlock(); +} +EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common); + +int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu) +{ + if (new_mtu < 68 || new_mtu > 65535) + return -EINVAL; + + dev->mtu = new_mtu; + return 0; +} +EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common); + +int sunvnet_set_mac_addr_common(struct net_device *dev, void *p) +{ + return -EINVAL; +} +EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common); + +void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) +{ + struct vio_dring_state *dr; + int i; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + if (dr->base == NULL) + return; + + for (i = 0; i < VNET_TX_RING_SIZE; i++) { + struct vio_net_desc *d; + void *skb = port->tx_bufs[i].skb; + + if (!skb) + continue; + + d = vio_dring_entry(dr, i); + + ldc_unmap(port->vio.lp, + port->tx_bufs[i].cookies, + port->tx_bufs[i].ncookies); + dev_kfree_skb(skb); + port->tx_bufs[i].skb = NULL; + d->hdr.state = VIO_DESC_FREE; + } + ldc_free_exp_dring(port->vio.lp, dr->base, + (dr->entry_size * dr->num_entries), + dr->cookies, dr->ncookies); + dr->base = NULL; + dr->entry_size = 0; + dr->num_entries = 0; + dr->pending = 0; + dr->ncookies = 0; +} +EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common); + +static void vnet_port_reset(struct vnet_port *port) +{ + del_timer(&port->clean_timer); + sunvnet_port_free_tx_bufs_common(port); + port->rmtu = 0; + port->tso = true; + port->tsolen = 0; +} + +static int vnet_port_alloc_tx_ring(struct vnet_port *port) +{ + struct vio_dring_state *dr; + unsigned long len, elen; + int i, err, ncookies; + void *dring; + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; + + elen = sizeof(struct vio_net_desc) + + sizeof(struct ldc_trans_cookie) * 2; + if (vio_version_after_eq(&port->vio, 1, 7)) + elen += sizeof(struct vio_net_dext); + len = VNET_TX_RING_SIZE * elen; + + ncookies = VIO_MAX_RING_COOKIES; + dring = ldc_alloc_exp_dring(port->vio.lp, len, + dr->cookies, &ncookies, + (LDC_MAP_SHADOW | + LDC_MAP_DIRECT | + LDC_MAP_RW)); + if (IS_ERR(dring)) { + err = PTR_ERR(dring); + goto err_out; + } + + dr->base = dring; + dr->entry_size = elen; + dr->num_entries = VNET_TX_RING_SIZE; + dr->prod = dr->cons = 0; + port->start_cons = true; /* need an initial trigger */ + dr->pending = VNET_TX_RING_SIZE; + dr->ncookies = ncookies; + + for (i = 0; i < VNET_TX_RING_SIZE; ++i) { + struct vio_net_desc *d; + + d = vio_dring_entry(dr, i); + d->hdr.state = VIO_DESC_FREE; + } + return 0; + +err_out: + sunvnet_port_free_tx_bufs_common(port); + + return err; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +void sunvnet_poll_controller_common(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port; + unsigned long flags; + + spin_lock_irqsave(&vp->lock, flags); + if (!list_empty(&vp->port_list)) { + port = list_entry(vp->port_list.next, struct vnet_port, list); + napi_schedule(&port->napi); + } + spin_unlock_irqrestore(&vp->lock, flags); +} +EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common); +#endif + +void sunvnet_port_add_txq_common(struct vnet_port *port) +{ + struct vnet *vp = port->vp; + int n; + + n = vp->nports++; + n = n & (VNET_MAX_TXQS - 1); + port->q_index = n; + netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); +} +EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); + +void sunvnet_port_rm_txq_common(struct vnet_port *port) +{ + port->vp->nports--; + netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); +} +EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h new file mode 100644 index 000000000000..c29c5269703d --- /dev/null +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -0,0 +1,133 @@ +#ifndef _SUNVNETCOMMON_H +#define _SUNVNETCOMMON_H + +#include + +/* length of time (or less) we expect pending descriptors to be marked + * as VIO_DESC_DONE and skbs ready to be freed + */ +#define VNET_CLEAN_TIMEOUT ((HZ / 100) + 1) + +#define VNET_MAXPACKET (65535ULL + ETH_HLEN + VLAN_HLEN) +#define VNET_TX_RING_SIZE 512 +#define VNET_TX_WAKEUP_THRESH(dr) ((dr)->pending / 4) + +#define VNET_MINTSO 2048 /* VIO protocol's minimum TSO len */ +#define VNET_MAXTSO 65535 /* VIO protocol's maximum TSO len */ + +/* VNET packets are sent in buffers with the first 6 bytes skipped + * so that after the ethernet header the IPv4/IPv6 headers are aligned + * properly. + */ +#define VNET_PACKET_SKIP 6 + +#define VNET_MAXCOOKIES (VNET_MAXPACKET / PAGE_SIZE + 1) + +#define VNET_MAX_TXQS 16 + +struct vnet_tx_entry { + struct sk_buff *skb; + unsigned int ncookies; + struct ldc_trans_cookie cookies[VNET_MAXCOOKIES]; +}; + +struct vnet; +struct vnet_port { + struct vio_driver_state vio; + + struct hlist_node hash; + u8 raddr[ETH_ALEN]; + unsigned switch_port:1; + unsigned tso:1; + unsigned __pad:14; + + struct vnet *vp; + + struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; + + struct list_head list; + + u32 stop_rx_idx; + bool stop_rx; + bool start_cons; + + struct timer_list clean_timer; + + u64 rmtu; + u16 tsolen; + + struct napi_struct napi; + u32 napi_stop_idx; + bool napi_resume; + int rx_event; + u16 q_index; +}; + +static inline struct vnet_port *to_vnet_port(struct vio_driver_state *vio) +{ + return container_of(vio, struct vnet_port, vio); +} + +#define VNET_PORT_HASH_SIZE 16 +#define VNET_PORT_HASH_MASK (VNET_PORT_HASH_SIZE - 1) + +static inline unsigned int vnet_hashfn(u8 *mac) +{ + unsigned int val = mac[4] ^ mac[5]; + + return val & (VNET_PORT_HASH_MASK); +} + +struct vnet_mcast_entry { + u8 addr[ETH_ALEN]; + u8 sent; + u8 hit; + struct vnet_mcast_entry *next; +}; + +struct vnet { + /* Protects port_list and port_hash. */ + spinlock_t lock; + + struct net_device *dev; + + u32 msg_enable; + + struct list_head port_list; + + struct hlist_head port_hash[VNET_PORT_HASH_SIZE]; + + struct vnet_mcast_entry *mcast_list; + + struct list_head list; + u64 local_mac; + + int nports; +}; + +/* Common funcs */ +void sunvnet_clean_timer_expire_common(unsigned long port0); +int sunvnet_open_common(struct net_device *dev); +int sunvnet_close_common(struct net_device *dev); +void sunvnet_set_rx_mode_common(struct net_device *dev); +int sunvnet_set_mac_addr_common(struct net_device *dev, void *p); +void sunvnet_tx_timeout_common(struct net_device *dev); +int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu); +int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev); +u16 sunvnet_select_queue_common(struct net_device *dev, + struct sk_buff *skb, + void *accel_priv, + select_queue_fallback_t fallback); +#ifdef CONFIG_NET_POLL_CONTROLLER +void sunvnet_poll_controller_common(struct net_device *dev); +#endif +void sunvnet_event_common(void *arg, int event); +int sunvnet_send_attr_common(struct vio_driver_state *vio); +int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg); +void sunvnet_handshake_complete_common(struct vio_driver_state *vio); +int sunvnet_poll_common(struct napi_struct *napi, int budget); +void sunvnet_port_free_tx_bufs_common(struct vnet_port *port); +void sunvnet_port_add_txq_common(struct vnet_port *port); +void sunvnet_port_rm_txq_common(struct vnet_port *port); + +#endif /* _SUNVNETCOMMON_H */ -- cgit v1.2.3 From 67d0719f06ded9488311472b3d65ad37d992c332 Mon Sep 17 00:00:00 2001 From: Aaron Young Date: Tue, 15 Mar 2016 11:35:38 -0700 Subject: ldmvsw: Make sunvnet_common compatible with ldmvsw Modify sunvnet common code and data structures to be compatible with both sunvnet and ldmvsw drivers. Details: Sunvnet operates on "vnet-port" nodes which appear in the Machine Description (MD) in a guest domain. Ldmvsw operates on "vsw-port" nodes which appear in the MD of a service domain. A difference between the sunvnet driver and the ldmvsw driver is the sunvnet driver creates a network interface (i.e. a struct net_device) for every vnet-port *parent* "network" node. Several vnet-ports may appear under this common parent network node - each corresponding to a common parent network interface. Conversely, since bridge/vswitch software will need to interface with every vsw-port in a system, the ldmvsw driver creates a network interface (i.e. a struct net_device) for every vsw-port - not every parent node as with sunvnet. This difference required some special handling in the common code as explained below. There are 2 key data structures used by the sunvnet and ldmvsw drivers (which are now found in sunvnet_common.h): 1. struct vnet_port This structure represents a vnet-port node in sunvnet and a vsw-port in the ldmvsw driver. 2. struct vnet This structure represents a parent "network" node in sunvnet and a parent "virtual-network-switch" node in ldmvsw. Since the sunvnet driver allocates a net_device for every parent "network" node, a net_device member appears in the struct vnet. Since the ldmvsw driver allocates a net_device for every port, a net_device member was added to the vnet_port. The common code distinguishes which structure net_device member to use by checking a 'vsw' bit that was added to the vnet_port structure. See the VNET_PORT_TO_NET_DEVICE() marco in sunvnet_common.h. The netdev_priv() in sunvnet is allocated as a vnet. The netdev_priv() in ldmvsw is a vnet_port. Therefore, any place in the common code where a netdev_priv() call was made, a wrapper function was implemented in each driver to first get the vnet and/or vnet_port (in a driver specific way) and pass them as newly added parameters to the common functions (see wrapper funcs: vnet_set_rx_mode() and vnet_poll_controller()). Since these wrapper functions call __tx_port_find(), __tx_port_find() was moved from the common code back into sunvnet.c. Note - ldmvsw.c does not require this function. These changes also required that port_is_up() be made into a common function and thus it was given a _common suffix and exported like the other common functions. A wrapper function was also added for vnet_start_xmit_common() to pass a driver-specific function arg to return the port associated with a given struct sk_buff and struct net_device. This was required because vnet_start_xmit_common() grabs a lock prior to getting the associated port. Using a function pointer arg allowed the code to work unchanged without risking changes to the non-trivial locking logic in vnet_start_xmit_common(). Signed-off-by: Aaron Young Signed-off-by: Rashmi Narasimhan Reviewed-by: Sowmini Varadhan Reviewed-by: Alexandre Chartre Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet.c | 74 +++++++++++++++++++++++-- drivers/net/ethernet/sun/sunvnet_common.c | 89 +++++++++++-------------------- drivers/net/ethernet/sun/sunvnet_common.h | 28 +++++++--- 3 files changed, 121 insertions(+), 70 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 5b9113524188..98c5f1612681 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -1,6 +1,7 @@ /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller + * Copyright (C) 2016 Oracle. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -84,18 +85,83 @@ static const struct ethtool_ops vnet_ethtool_ops = { static LIST_HEAD(vnet_list); static DEFINE_MUTEX(vnet_list_mutex); +static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) +{ + unsigned int hash = vnet_hashfn(skb->data); + struct hlist_head *hp = &vp->port_hash[hash]; + struct vnet_port *port; + + hlist_for_each_entry_rcu(port, hp, hash) { + if (!sunvnet_port_is_up_common(port)) + continue; + if (ether_addr_equal(port->raddr, skb->data)) + return port; + } + list_for_each_entry_rcu(port, &vp->port_list, list) { + if (!port->switch_port) + continue; + if (!sunvnet_port_is_up_common(port)) + continue; + return port; + } + return NULL; +} + +/* func arg to vnet_start_xmit_common() to get the proper tx port */ +static struct vnet_port *vnet_tx_port_find(struct sk_buff *skb, + struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + + return __tx_port_find(vp, skb); +} + +static u16 vnet_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet *vp = netdev_priv(dev); + struct vnet_port *port = __tx_port_find(vp, skb); + + if (!port) + return 0; + + return port->q_index; +} + +/* Wrappers to common functions */ +static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return sunvnet_start_xmit_common(skb, dev, vnet_tx_port_find); +} + +static void vnet_set_rx_mode(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + + return sunvnet_set_rx_mode_common(dev, vp); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vnet_poll_controller(struct net_device *dev) +{ + struct vnet *vp = netdev_priv(dev); + + return sunvnet_poll_controller_common(dev, vp); +} +#endif + static const struct net_device_ops vnet_ops = { .ndo_open = sunvnet_open_common, .ndo_stop = sunvnet_close_common, - .ndo_set_rx_mode = sunvnet_set_rx_mode_common, + .ndo_set_rx_mode = vnet_set_rx_mode, .ndo_set_mac_address = sunvnet_set_mac_addr_common, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = sunvnet_tx_timeout_common, .ndo_change_mtu = sunvnet_change_mtu_common, - .ndo_start_xmit = sunvnet_start_xmit_common, - .ndo_select_queue = sunvnet_select_queue_common, + .ndo_start_xmit = vnet_start_xmit, + .ndo_select_queue = vnet_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = sunvnet_poll_controller_common, + .ndo_poll_controller = vnet_poll_controller, #endif }; diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 49e85d0d960b..083f41c93933 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -1,6 +1,7 @@ /* sunvnet.c: Sun LDOM Virtual Network Driver. * * Copyright (C) 2007, 2008 David S. Miller + * Copyright (C) 2016 Oracle. All rights reserved. */ #include @@ -62,7 +63,7 @@ static int vnet_port_alloc_tx_ring(struct vnet_port *port); int sunvnet_send_attr_common(struct vio_driver_state *vio) { struct vnet_port *port = to_vnet_port(vio); - struct net_device *dev = port->vp->dev; + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_net_attr_info pkt; int framelen = ETH_FRAME_LEN; int i, err; @@ -330,7 +331,7 @@ static inline void vnet_fullcsum(struct sk_buff *skb) static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) { - struct net_device *dev = port->vp->dev; + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); unsigned int len = desc->size; unsigned int copy_len; struct sk_buff *skb; @@ -633,7 +634,6 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct vio_dring_data *pkt = msgbuf; struct net_device *dev; - struct vnet *vp; u32 end; struct vio_net_desc *desc; struct netdev_queue *txq; @@ -642,8 +642,7 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf) return 0; end = pkt->end_idx; - vp = port->vp; - dev = vp->dev; + dev = VNET_PORT_TO_NET_DEVICE(port); netif_tx_lock(dev); if (unlikely(!idx_is_pending(dr, end))) { netif_tx_unlock(dev); @@ -688,10 +687,11 @@ static int vnet_nack(struct vnet_port *port, void *msgbuf) static int handle_mcast(struct vnet_port *port, void *msgbuf) { struct vio_net_mcast_info *pkt = msgbuf; + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); if (pkt->tag.stype != VIO_SUBTYPE_ACK) pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n", - port->vp->dev->name, + dev->name, pkt->tag.type, pkt->tag.stype, pkt->tag.stype_env, @@ -708,7 +708,8 @@ static void maybe_tx_wakeup(struct vnet_port *port) { struct netdev_queue *txq; - txq = netdev_get_tx_queue(port->vp->dev, port->q_index); + txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), + port->q_index); __netif_tx_lock(txq, smp_processor_id()); if (likely(netif_tx_queue_stopped(txq))) { struct vio_dring_state *dr; @@ -719,12 +720,13 @@ static void maybe_tx_wakeup(struct vnet_port *port) __netif_tx_unlock(txq); } -static inline bool port_is_up(struct vnet_port *vnet) +bool sunvnet_port_is_up_common(struct vnet_port *vnet) { struct vio_driver_state *vio = &vnet->vio; return !!(vio->hs_state & VIO_HS_COMPLETE); } +EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common); static int vnet_event_napi(struct vnet_port *port, int budget) { @@ -797,7 +799,7 @@ ldc_ctrl: napi_resume: if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) { if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) { - if (!port_is_up(port)) { + if (!sunvnet_port_is_up_common(port)) { /* failures like handshake_failure() * may have cleaned up dring, but * NAPI polling may bring us here. @@ -911,28 +913,6 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) return err; } -static struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb) -{ - unsigned int hash = vnet_hashfn(skb->data); - struct hlist_head *hp = &vp->port_hash[hash]; - struct vnet_port *port; - - hlist_for_each_entry_rcu(port, hp, hash) { - if (!port_is_up(port)) - continue; - if (ether_addr_equal(port->raddr, skb->data)) - return port; - } - list_for_each_entry_rcu(port, &vp->port_list, list) { - if (!port->switch_port) - continue; - if (!port_is_up(port)) - continue; - return port; - } - return NULL; -} - static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, unsigned *pending) { @@ -994,9 +974,9 @@ void sunvnet_clean_timer_expire_common(unsigned long port0) struct sk_buff *freeskbs; unsigned pending; - netif_tx_lock(port->vp->dev); + netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port)); freeskbs = vnet_clean_tx_ring(port, &pending); - netif_tx_unlock(port->vp->dev); + netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port)); vnet_free_skbs(freeskbs); @@ -1140,21 +1120,11 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) return skb; } -u16 sunvnet_select_queue_common(struct net_device *dev, struct sk_buff *skb, - void *accel_priv, select_queue_fallback_t fallback) +static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { - struct vnet *vp = netdev_priv(dev); - struct vnet_port *port = __tx_port_find(vp, skb); - - if (port == NULL) - return 0; - return port->q_index; -} -EXPORT_SYMBOL_GPL(sunvnet_select_queue_common); - -static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) -{ - struct net_device *dev = port->vp->dev; + struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port); struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; struct sk_buff *segs; int maclen, datalen; @@ -1239,7 +1209,8 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb) curr->csum_offset = offsetof(struct udphdr, check); if (!(status & NETDEV_TX_MASK)) - status = sunvnet_start_xmit_common(curr, dev); + status = sunvnet_start_xmit_common(curr, dev, + vnet_tx_port); if (status & NETDEV_TX_MASK) dev_kfree_skb_any(curr); } @@ -1253,9 +1224,10 @@ out_dropped: return NETDEV_TX_OK; } -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev) +int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)) { - struct vnet *vp = netdev_priv(dev); struct vnet_port *port = NULL; struct vio_dring_state *dr; struct vio_net_desc *d; @@ -1266,14 +1238,14 @@ int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev) struct netdev_queue *txq; rcu_read_lock(); - port = __tx_port_find(vp, skb); + port = vnet_tx_port(skb, dev); if (unlikely(!port)) { rcu_read_unlock(); goto out_dropped; } if (skb_is_gso(skb) && skb->len > port->tsolen) { - err = vnet_handle_offloads(port, skb); + err = vnet_handle_offloads(port, skb, vnet_tx_port); rcu_read_unlock(); return err; } @@ -1588,9 +1560,8 @@ static void __send_mc_list(struct vnet *vp, struct vnet_port *port) } } -void sunvnet_set_rx_mode_common(struct net_device *dev) +void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) { - struct vnet *vp = netdev_priv(dev); struct vnet_port *port; rcu_read_lock(); @@ -1717,9 +1688,8 @@ err_out: } #ifdef CONFIG_NET_POLL_CONTROLLER -void sunvnet_poll_controller_common(struct net_device *dev) +void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp) { - struct vnet *vp = netdev_priv(dev); struct vnet_port *port; unsigned long flags; @@ -1741,13 +1711,16 @@ void sunvnet_port_add_txq_common(struct vnet_port *port) n = vp->nports++; n = n & (VNET_MAX_TXQS - 1); port->q_index = n; - netif_tx_wake_queue(netdev_get_tx_queue(vp->dev, port->q_index)); + netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), + port->q_index)); + } EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); void sunvnet_port_rm_txq_common(struct vnet_port *port) { port->vp->nports--; - netif_tx_stop_queue(netdev_get_tx_queue(port->vp->dev, port->q_index)); + netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), + port->q_index)); } EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common); diff --git a/drivers/net/ethernet/sun/sunvnet_common.h b/drivers/net/ethernet/sun/sunvnet_common.h index c29c5269703d..bd36528af972 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.h +++ b/drivers/net/ethernet/sun/sunvnet_common.h @@ -32,6 +32,13 @@ struct vnet_tx_entry { }; struct vnet; + +/* Structure to describe a vnet-port or vsw-port in the MD. + * If the vsw bit is set, this structure represents a vswitch + * port, and the net_device can be found from ->dev. If the + * vsw bit is not set, the net_device is available from ->vp->dev. + * See the VNET_PORT_TO_NET_DEVICE macro below. + */ struct vnet_port { struct vio_driver_state vio; @@ -39,9 +46,11 @@ struct vnet_port { u8 raddr[ETH_ALEN]; unsigned switch_port:1; unsigned tso:1; - unsigned __pad:14; + unsigned vsw:1; + unsigned __pad:13; struct vnet *vp; + struct net_device *dev; struct vnet_tx_entry tx_bufs[VNET_TX_RING_SIZE]; @@ -105,21 +114,23 @@ struct vnet { int nports; }; +/* Def used by common code to get the net_device from the proper location */ +#define VNET_PORT_TO_NET_DEVICE(__port) \ + ((__port)->vsw ? (__port)->dev : (__port)->vp->dev) + /* Common funcs */ void sunvnet_clean_timer_expire_common(unsigned long port0); int sunvnet_open_common(struct net_device *dev); int sunvnet_close_common(struct net_device *dev); -void sunvnet_set_rx_mode_common(struct net_device *dev); +void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp); int sunvnet_set_mac_addr_common(struct net_device *dev, void *p); void sunvnet_tx_timeout_common(struct net_device *dev); int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu); -int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev); -u16 sunvnet_select_queue_common(struct net_device *dev, - struct sk_buff *skb, - void *accel_priv, - select_queue_fallback_t fallback); +int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev, + struct vnet_port *(*vnet_tx_port) + (struct sk_buff *, struct net_device *)); #ifdef CONFIG_NET_POLL_CONTROLLER -void sunvnet_poll_controller_common(struct net_device *dev); +void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp); #endif void sunvnet_event_common(void *arg, int event); int sunvnet_send_attr_common(struct vio_driver_state *vio); @@ -127,6 +138,7 @@ int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg); void sunvnet_handshake_complete_common(struct vio_driver_state *vio); int sunvnet_poll_common(struct napi_struct *napi, int budget); void sunvnet_port_free_tx_bufs_common(struct vnet_port *port); +bool sunvnet_port_is_up_common(struct vnet_port *vnet); void sunvnet_port_add_txq_common(struct vnet_port *port); void sunvnet_port_rm_txq_common(struct vnet_port *port); -- cgit v1.2.3 From 5d01fa0c6bd84ddf1339a3fadfefecd2c28d472e Mon Sep 17 00:00:00 2001 From: Aaron Young Date: Tue, 15 Mar 2016 11:35:39 -0700 Subject: ldmvsw: Add ldmvsw.c driver code Add ldmvsw.c driver Details: The ldmvsw driver very closely follows the sunvnet.c code and makes use of the sunvnet_common.c code for core functionality. A significant difference between sunvnet and ldmvsw driver is sunvnet creates a network interface for each vnet-port *parent* node in the MD while the ldmvsw driver creates a network interface for every vsw-port node in the Machine Description (MD). Therefore the netdev_priv() for sunvnet is a vnet structure while the netdev_priv() for ldmvsw is a vnet_port structure. Vnet_port structures allocated by ldmvsw have the vsw bit set. When finding the net_device associated with a port, the common code keys off this bit to use either the net_device found in the vnet_port or the net_device in the vnet structure (see the VNET_PORT_TO_NET_DEVICE() macro in sunvnet_common.h). This scheme allows the common code to work with both drivers with minimal changes. Similar to Xen, network interfaces created by the ldmvsw driver will always have a HW Addr (i.e. mac address) of FE:FF:FF:FF:FF:FF and each will be assigned the devname "vif." - where and are a unique handle/port pair assigned to the associated vsw-port node in the MD. Signed-off-by: Aaron Young Signed-off-by: Rashmi Narasimhan Reviewed-by: Sowmini Varadhan Reviewed-by: Alexandre Chartre Signed-off-by: David S. Miller --- arch/sparc/configs/sparc64_defconfig | 1 + drivers/net/ethernet/sun/Kconfig | 11 + drivers/net/ethernet/sun/Makefile | 1 + drivers/net/ethernet/sun/ldmvsw.c | 468 +++++++++++++++++++++++++++++++++++ 4 files changed, 481 insertions(+) create mode 100644 drivers/net/ethernet/sun/ldmvsw.c (limited to 'drivers') diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig index 6b68f12f29db..04920ab8e292 100644 --- a/arch/sparc/configs/sparc64_defconfig +++ b/arch/sparc/configs/sparc64_defconfig @@ -102,6 +102,7 @@ CONFIG_SUNLANCE=m CONFIG_HAPPYMEAL=m CONFIG_SUNGEM=m CONFIG_SUNVNET=m +CONFIG_LDMVSW=m CONFIG_NET_PCI=y CONFIG_E1000=m CONFIG_E1000E=m diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig index aa58c11ea6db..a4b40e3015e5 100644 --- a/drivers/net/ethernet/sun/Kconfig +++ b/drivers/net/ethernet/sun/Kconfig @@ -80,6 +80,17 @@ config SUNVNET ---help--- Support for virtual network devices under Sun Logical Domains. +config LDMVSW + tristate "Sun4v LDoms Virtual Switch support" + depends on SUN_LDOMS + ---help--- + Support for virtual switch devices under Sun4v Logical Domains. + This driver adds a network interface for every vsw-port node + found in the machine description of a service domain. + Linux bridge/switch software can use these interfaces for + guest domain network interconnectivity or guest domain + connection to a physical network on a service domain. + config NIU tristate "Sun Neptune 10Gbit Ethernet support" depends on PCI diff --git a/drivers/net/ethernet/sun/Makefile b/drivers/net/ethernet/sun/Makefile index 7b622aa14c5b..37855438b3cb 100644 --- a/drivers/net/ethernet/sun/Makefile +++ b/drivers/net/ethernet/sun/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_SUNGEM) += sungem.o obj-$(CONFIG_CASSINI) += cassini.o obj-$(CONFIG_SUNVNET_COMMON) += sunvnet_common.o obj-$(CONFIG_SUNVNET) += sunvnet.o +obj-$(CONFIG_LDMVSW) += ldmvsw.o obj-$(CONFIG_NIU) += niu.o diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c new file mode 100644 index 000000000000..e15bf84fc6b2 --- /dev/null +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -0,0 +1,468 @@ +/* ldmvsw.c: Sun4v LDOM Virtual Switch Driver. + * + * Copyright (C) 2016 Oracle. All rights reserved. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if defined(CONFIG_IPV6) +#include +#endif + +#include +#include +#include + +#include +#include + +/* This driver makes use of the common code in sunvnet_common.c */ +#include "sunvnet_common.h" + +/* Length of time before we decide the hardware is hung, + * and dev->tx_timeout() should be called to fix the problem. + */ +#define VSW_TX_TIMEOUT (10 * HZ) + +/* Static HW Addr used for the network interfaces representing vsw ports */ +static u8 vsw_port_hwaddr[ETH_ALEN] = {0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; + +#define DRV_MODULE_NAME "ldmvsw" +#define DRV_MODULE_VERSION "1.0" +#define DRV_MODULE_RELDATE "Jan 15, 2016" + +static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +MODULE_AUTHOR("Oracle"); +MODULE_DESCRIPTION("Sun4v LDOM Virtual Switch Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_MODULE_VERSION); + +/* Ordered from largest major to lowest */ +static struct vio_version vsw_versions[] = { + { .major = 1, .minor = 8 }, + { .major = 1, .minor = 7 }, + { .major = 1, .minor = 6 }, + { .major = 1, .minor = 0 }, +}; + +static void vsw_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); +} + +static u32 vsw_get_msglevel(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return port->vp->msg_enable; +} + +static void vsw_set_msglevel(struct net_device *dev, u32 value) +{ + struct vnet_port *port = netdev_priv(dev); + + port->vp->msg_enable = value; +} + +static const struct ethtool_ops vsw_ethtool_ops = { + .get_drvinfo = vsw_get_drvinfo, + .get_msglevel = vsw_get_msglevel, + .set_msglevel = vsw_set_msglevel, + .get_link = ethtool_op_get_link, +}; + +static LIST_HEAD(vnet_list); +static DEFINE_MUTEX(vnet_list_mutex); + +/* func arg to vnet_start_xmit_common() to get the proper tx port */ +static struct vnet_port *vsw_tx_port_find(struct sk_buff *skb, + struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return port; +} + +static u16 vsw_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + struct vnet_port *port = netdev_priv(dev); + + if (!port) + return 0; + + return port->q_index; +} + +/* Wrappers to common functions */ +static int vsw_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + return sunvnet_start_xmit_common(skb, dev, vsw_tx_port_find); +} + +static void vsw_set_rx_mode(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return sunvnet_set_rx_mode_common(dev, port->vp); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void vsw_poll_controller(struct net_device *dev) +{ + struct vnet_port *port = netdev_priv(dev); + + return sunvnet_poll_controller_common(dev, port->vp); +} +#endif + +static const struct net_device_ops vsw_ops = { + .ndo_open = sunvnet_open_common, + .ndo_stop = sunvnet_close_common, + .ndo_set_rx_mode = vsw_set_rx_mode, + .ndo_set_mac_address = sunvnet_set_mac_addr_common, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = sunvnet_tx_timeout_common, + .ndo_change_mtu = sunvnet_change_mtu_common, + .ndo_start_xmit = vsw_start_xmit, + .ndo_select_queue = vsw_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = vsw_poll_controller, +#endif +}; + +static const char *local_mac_prop = "local-mac-address"; +static const char *cfg_handle_prop = "cfg-handle"; + +static struct vnet *vsw_get_vnet(struct mdesc_handle *hp, + u64 port_node, + u64 *handle) +{ + struct vnet *vp; + struct vnet *iter; + const u64 *local_mac = NULL; + const u64 *cfghandle = NULL; + u64 a; + + /* Get the parent virtual-network-switch macaddr and cfghandle */ + mdesc_for_each_arc(a, hp, port_node, MDESC_ARC_TYPE_BACK) { + u64 target = mdesc_arc_target(hp, a); + const char *name; + + name = mdesc_get_property(hp, target, "name", NULL); + if (!name || strcmp(name, "virtual-network-switch")) + continue; + + local_mac = mdesc_get_property(hp, target, + local_mac_prop, NULL); + cfghandle = mdesc_get_property(hp, target, + cfg_handle_prop, NULL); + break; + } + if (!local_mac || !cfghandle) + return ERR_PTR(-ENODEV); + + /* find or create associated vnet */ + vp = NULL; + mutex_lock(&vnet_list_mutex); + list_for_each_entry(iter, &vnet_list, list) { + if (iter->local_mac == *local_mac) { + vp = iter; + break; + } + } + + if (!vp) { + vp = kzalloc(sizeof(*vp), GFP_KERNEL); + if (unlikely(!vp)) { + mutex_unlock(&vnet_list_mutex); + return ERR_PTR(-ENOMEM); + } + + spin_lock_init(&vp->lock); + INIT_LIST_HEAD(&vp->port_list); + INIT_LIST_HEAD(&vp->list); + vp->local_mac = *local_mac; + list_add(&vp->list, &vnet_list); + } + + mutex_unlock(&vnet_list_mutex); + + *handle = (u64)*cfghandle; + + return vp; +} + +static struct net_device *vsw_alloc_netdev(u8 hwaddr[], + struct vio_dev *vdev, + u64 handle, + u64 port_id) +{ + struct net_device *dev; + struct vnet_port *port; + int i; + + dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1); + if (!dev) + return ERR_PTR(-ENOMEM); + dev->needed_headroom = VNET_PACKET_SKIP + 8; + dev->needed_tailroom = 8; + + for (i = 0; i < ETH_ALEN; i++) { + dev->dev_addr[i] = hwaddr[i]; + dev->perm_addr[i] = dev->dev_addr[i]; + } + + sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id); + + dev->netdev_ops = &vsw_ops; + dev->ethtool_ops = &vsw_ethtool_ops; + dev->watchdog_timeo = VSW_TX_TIMEOUT; + + dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | + NETIF_F_HW_CSUM | NETIF_F_SG; + dev->features = dev->hw_features; + + SET_NETDEV_DEV(dev, &vdev->dev); + + return dev; +} + +static struct ldc_channel_config vsw_ldc_cfg = { + .event = sunvnet_event_common, + .mtu = 64, + .mode = LDC_MODE_UNRELIABLE, +}; + +static struct vio_driver_ops vsw_vio_ops = { + .send_attr = sunvnet_send_attr_common, + .handle_attr = sunvnet_handle_attr_common, + .handshake_complete = sunvnet_handshake_complete_common, +}; + +static void print_version(void) +{ + printk_once(KERN_INFO "%s", version); +} + +static const char *remote_macaddr_prop = "remote-mac-address"; +static const char *id_prop = "id"; + +static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) +{ + struct mdesc_handle *hp; + struct vnet_port *port; + unsigned long flags; + struct vnet *vp; + struct net_device *dev; + const u64 *rmac; + int len, i, err; + const u64 *port_id; + u64 handle; + + print_version(); + + hp = mdesc_grab(); + + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); + err = -ENODEV; + if (!rmac) { + pr_err("Port lacks %s property\n", remote_macaddr_prop); + mdesc_release(hp); + return err; + } + + port_id = mdesc_get_property(hp, vdev->mp, id_prop, NULL); + err = -ENODEV; + if (!port_id) { + pr_err("Port lacks %s property\n", id_prop); + mdesc_release(hp); + return err; + } + + /* Get (or create) the vnet associated with this port */ + vp = vsw_get_vnet(hp, vdev->mp, &handle); + if (unlikely(IS_ERR(vp))) { + err = PTR_ERR(vp); + pr_err("Failed to get vnet for vsw-port\n"); + mdesc_release(hp); + return err; + } + + mdesc_release(hp); + + dev = vsw_alloc_netdev(vsw_port_hwaddr, vdev, handle, *port_id); + if (IS_ERR(dev)) { + err = PTR_ERR(dev); + pr_err("Failed to alloc netdev for vsw-port\n"); + return err; + } + + port = netdev_priv(dev); + + INIT_LIST_HEAD(&port->list); + + for (i = 0; i < ETH_ALEN; i++) + port->raddr[i] = (*rmac >> (5 - i) * 8) & 0xff; + + port->vp = vp; + port->dev = dev; + port->switch_port = 1; + port->tso = true; + port->tsolen = 0; + + /* Mark the port as belonging to ldmvsw which directs the + * the common code to use the net_device in the vnet_port + * rather than the net_device in the vnet (which is used + * by sunvnet). This bit is used by the VNET_PORT_TO_NET_DEVICE + * macro. + */ + port->vsw = 1; + + err = vio_driver_init(&port->vio, vdev, VDEV_NETWORK, + vsw_versions, ARRAY_SIZE(vsw_versions), + &vsw_vio_ops, dev->name); + if (err) + goto err_out_free_dev; + + err = vio_ldc_alloc(&port->vio, &vsw_ldc_cfg, port); + if (err) + goto err_out_free_dev; + + dev_set_drvdata(&vdev->dev, port); + + netif_napi_add(dev, &port->napi, sunvnet_poll_common, + NAPI_POLL_WEIGHT); + + spin_lock_irqsave(&vp->lock, flags); + list_add_rcu(&port->list, &vp->port_list); + spin_unlock_irqrestore(&vp->lock, flags); + + setup_timer(&port->clean_timer, sunvnet_clean_timer_expire_common, + (unsigned long)port); + + err = register_netdev(dev); + if (err) { + pr_err("Cannot register net device, aborting\n"); + goto err_out_del_timer; + } + + spin_lock_irqsave(&vp->lock, flags); + sunvnet_port_add_txq_common(port); + spin_unlock_irqrestore(&vp->lock, flags); + + napi_enable(&port->napi); + vio_port_up(&port->vio); + + netdev_info(dev, "LDOM vsw-port %pM\n", dev->dev_addr); + + pr_info("%s: PORT ( remote-mac %pM%s )\n", dev->name, + port->raddr, " switch-port"); + + return 0; + +err_out_del_timer: + del_timer_sync(&port->clean_timer); + list_del_rcu(&port->list); + synchronize_rcu(); + netif_napi_del(&port->napi); + dev_set_drvdata(&vdev->dev, NULL); + vio_ldc_free(&port->vio); + +err_out_free_dev: + free_netdev(dev); + return err; +} + +static int vsw_port_remove(struct vio_dev *vdev) +{ + struct vnet_port *port = dev_get_drvdata(&vdev->dev); + unsigned long flags; + + if (port) { + del_timer_sync(&port->vio.timer); + + napi_disable(&port->napi); + + list_del_rcu(&port->list); + + synchronize_rcu(); + del_timer_sync(&port->clean_timer); + spin_lock_irqsave(&port->vp->lock, flags); + sunvnet_port_rm_txq_common(port); + spin_unlock_irqrestore(&port->vp->lock, flags); + netif_napi_del(&port->napi); + sunvnet_port_free_tx_bufs_common(port); + vio_ldc_free(&port->vio); + + dev_set_drvdata(&vdev->dev, NULL); + + unregister_netdev(port->dev); + free_netdev(port->dev); + } + + return 0; +} + +static void vsw_cleanup(void) +{ + struct vnet *vp; + + /* just need to free up the vnet list */ + mutex_lock(&vnet_list_mutex); + while (!list_empty(&vnet_list)) { + vp = list_first_entry(&vnet_list, struct vnet, list); + list_del(&vp->list); + /* vio_unregister_driver() should have cleaned up port_list */ + if (!list_empty(&vp->port_list)) + pr_err("Ports not removed by VIO subsystem!\n"); + kfree(vp); + } + mutex_unlock(&vnet_list_mutex); +} + +static const struct vio_device_id vsw_port_match[] = { + { + .type = "vsw-port", + }, + {}, +}; +MODULE_DEVICE_TABLE(vio, vsw_port_match); + +static struct vio_driver vsw_port_driver = { + .id_table = vsw_port_match, + .probe = vsw_port_probe, + .remove = vsw_port_remove, + .name = "vsw_port", +}; + +static int __init vsw_init(void) +{ + return vio_register_driver(&vsw_port_driver); +} + +static void __exit vsw_exit(void) +{ + vio_unregister_driver(&vsw_port_driver); + vsw_cleanup(); +} + +module_init(vsw_init); +module_exit(vsw_exit); -- cgit v1.2.3 From dc153f850daba6eb665fbfedd349d09bcfd9bda9 Mon Sep 17 00:00:00 2001 From: Aaron Young Date: Tue, 15 Mar 2016 11:35:40 -0700 Subject: ldmvsw: Checkpatch sunvnet.c and sunvnet_common.c Checkpatch updates for sunvnet.c and sunvnet_common.c. Signed-off-by: Aaron Young Signed-off-by: Rashmi Narasimhan Reviewed-by: Sowmini Varadhan Reviewed-by: Alexandre Chartre Signed-off-by: David S. Miller --- drivers/net/ethernet/sun/sunvnet.c | 6 +- drivers/net/ethernet/sun/sunvnet_common.c | 100 ++++++++++++++++-------------- 2 files changed, 56 insertions(+), 50 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 98c5f1612681..a2f9b47de187 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -66,12 +66,14 @@ static void vnet_get_drvinfo(struct net_device *dev, static u32 vnet_get_msglevel(struct net_device *dev) { struct vnet *vp = netdev_priv(dev); + return vp->msg_enable; } static void vnet_set_msglevel(struct net_device *dev, u32 value) { struct vnet *vp = netdev_priv(dev); + vp->msg_enable = value; } @@ -359,7 +361,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) INIT_LIST_HEAD(&port->list); switch_port = 0; - if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL) != NULL) + if (mdesc_get_property(hp, vdev->mp, "switch-port", NULL)) switch_port = 1; port->switch_port = switch_port; port->tso = true; @@ -403,7 +405,6 @@ static int vnet_port_remove(struct vio_dev *vdev) struct vnet_port *port = dev_get_drvdata(&vdev->dev); if (port) { - del_timer_sync(&port->vio.timer); napi_disable(&port->napi); @@ -421,7 +422,6 @@ static int vnet_port_remove(struct vio_dev *vdev) dev_set_drvdata(&vdev->dev, NULL); kfree(port); - } return 0; } diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c index 083f41c93933..904a5a12a85d 100644 --- a/drivers/net/ethernet/sun/sunvnet_common.c +++ b/drivers/net/ethernet/sun/sunvnet_common.c @@ -119,7 +119,6 @@ int sunvnet_send_attr_common(struct vio_driver_state *vio) pkt.ack_freq, pkt.plnk_updt, pkt.options, (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen); - return vio_ldc_send(vio, &pkt, sizeof(pkt)); } EXPORT_SYMBOL_GPL(sunvnet_send_attr_common); @@ -197,24 +196,23 @@ static int handle_attr_info(struct vio_driver_state *vio, pkt->tag.stype = VIO_SUBTYPE_NACK; - (void) vio_ldc_send(vio, pkt, sizeof(*pkt)); + (void)vio_ldc_send(vio, pkt, sizeof(*pkt)); return -ECONNRESET; - } else { - viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " - "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " - "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", - pkt->xfer_mode, pkt->addr_type, - (unsigned long long)pkt->addr, - pkt->ack_freq, pkt->plnk_updt, pkt->options, - (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, - pkt->ipv4_lso_maxlen); - - pkt->tag.stype = VIO_SUBTYPE_ACK; - - return vio_ldc_send(vio, pkt, sizeof(*pkt)); } + viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] " + "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] " + "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n", + pkt->xfer_mode, pkt->addr_type, + (unsigned long long)pkt->addr, + pkt->ack_freq, pkt->plnk_updt, pkt->options, + (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags, + pkt->ipv4_lso_maxlen); + + pkt->tag.stype = VIO_SUBTYPE_ACK; + + return vio_ldc_send(vio, pkt, sizeof(*pkt)); } static int handle_attr_ack(struct vio_driver_state *vio, @@ -258,10 +256,12 @@ void sunvnet_handshake_complete_common(struct vio_driver_state *vio) struct vio_dring_state *dr; dr = &vio->drings[VIO_DRIVER_RX_RING]; - dr->snd_nxt = dr->rcv_nxt = 1; + dr->rcv_nxt = 1; + dr->snd_nxt = 1; dr = &vio->drings[VIO_DRIVER_TX_RING]; - dr->snd_nxt = dr->rcv_nxt = 1; + dr->rcv_nxt = 1; + dr->snd_nxt = 1; } EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); @@ -283,13 +283,14 @@ EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common); static struct sk_buff *alloc_and_align_skb(struct net_device *dev, unsigned int len) { - struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8); + struct sk_buff *skb; unsigned long addr, off; + skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8); if (unlikely(!skb)) return NULL; - addr = (unsigned long) skb->data; + addr = (unsigned long)skb->data; off = ((addr + 7UL) & ~7UL) - addr; if (off) skb_reserve(skb, off); @@ -505,7 +506,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, struct vio_driver_state *vio = &port->vio; int err; - BUG_ON(desc == NULL); + BUG_ON(!desc); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -540,13 +541,14 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, int ack_start = -1, ack_end = -1; bool send_ack = true; - end = (end == (u32) -1) ? vio_dring_prev(dr, start) - : vio_dring_next(dr, end); + end = (end == (u32)-1) ? vio_dring_prev(dr, start) + : vio_dring_next(dr, end); viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end); while (start != end) { int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack); + if (err == -ECONNRESET) return err; if (err != 0) @@ -568,8 +570,10 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr, break; } } - if (unlikely(ack_start == -1)) - ack_start = ack_end = vio_dring_prev(dr, start); + if (unlikely(ack_start == -1)) { + ack_end = vio_dring_prev(dr, start); + ack_start = ack_end; + } if (send_ack) { port->napi_resume = false; trace_vnet_tx_send_stopped_ack(port->vio._local_sid, @@ -749,7 +753,7 @@ ldc_ctrl: } /* We may have multiple LDC events in rx_event. Unroll send_events() */ event = (port->rx_event & LDC_EVENT_UP); - port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP); + port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP); if (event == LDC_EVENT_UP) goto ldc_ctrl; event = port->rx_event; @@ -759,7 +763,8 @@ ldc_ctrl: /* we dont expect any other bits than RESET, UP, DATA_READY */ BUG_ON(event != LDC_EVENT_DATA_READY); - tx_wakeup = err = 0; + err = 0; + tx_wakeup = 0; while (1) { union { struct vio_msg_tag tag; @@ -776,7 +781,8 @@ ldc_ctrl: pkt->tag.stype = VIO_SUBTYPE_INFO; pkt->tag.stype_env = VIO_DRING_DATA; pkt->seq = dr->rcv_nxt; - pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx); + pkt->start_idx = vio_dring_next(dr, + port->napi_stop_idx); pkt->end_idx = -1; goto napi_resume; } @@ -860,7 +866,6 @@ void sunvnet_event_common(void *arg, int event) port->rx_event |= event; vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED); napi_schedule(&port->napi); - } EXPORT_SYMBOL_GPL(sunvnet_event_common); @@ -876,7 +881,7 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start) }, .dring_ident = dr->ident, .start_idx = start, - .end_idx = (u32) -1, + .end_idx = (u32)-1, }; int err, delay; int retries = 0; @@ -928,7 +933,7 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, --txi; if (txi < 0) - txi = VNET_TX_RING_SIZE-1; + txi = VNET_TX_RING_SIZE - 1; d = vio_dring_entry(dr, txi); @@ -949,8 +954,9 @@ static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port, ldc_unmap(port->vio.lp, port->tx_bufs[txi].cookies, port->tx_bufs[txi].ncookies); - } else if (d->hdr.state == VIO_DESC_FREE) + } else if (d->hdr.state == VIO_DESC_FREE) { break; + } d->hdr.state = VIO_DESC_FREE; } return skb; @@ -1001,7 +1007,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb, blen += VNET_PACKET_SKIP; blen += 8 - (blen & 7); - err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies, + err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies, ncookies, map_perm); if (err < 0) return err; @@ -1061,7 +1067,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies) len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN; nskb = alloc_and_align_skb(skb->dev, len); - if (nskb == NULL) { + if (!nskb) { dev_kfree_skb(skb); return NULL; } @@ -1138,11 +1144,11 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, else if (skb->protocol == htons(ETH_P_IPV6)) proto = ipv6_hdr(skb)->nexthdr; - if (proto == IPPROTO_TCP) + if (proto == IPPROTO_TCP) { hlen += tcp_hdr(skb)->doff * 4; - else if (proto == IPPROTO_UDP) + } else if (proto == IPPROTO_UDP) { hlen += sizeof(struct udphdr); - else { + } else { pr_err("vnet_handle_offloads GSO with unknown transport " "protocol %d tproto %d\n", skb->protocol, proto); hlen = 128; /* XXX */ @@ -1195,8 +1201,9 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb, skb_shinfo(curr)->gso_type = gso_type; skb_shinfo(curr)->gso_segs = DIV_ROUND_UP(curr->len - hlen, gso_size); - } else + } else { skb_shinfo(curr)->gso_size = 0; + } skb_push(curr, maclen); skb_reset_mac_header(curr); @@ -1521,14 +1528,14 @@ static void __send_mc_list(struct vnet *vp, struct vnet_port *port) if (++n_addrs == VNET_NUM_MCAST) { info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, - sizeof(info)); + (void)vio_ldc_send(&port->vio, &info, + sizeof(info)); n_addrs = 0; } } if (n_addrs) { info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + (void)vio_ldc_send(&port->vio, &info, sizeof(info)); } info.set = 0; @@ -1546,8 +1553,8 @@ static void __send_mc_list(struct vnet *vp, struct vnet_port *port) m->addr, ETH_ALEN); if (++n_addrs == VNET_NUM_MCAST) { info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, - sizeof(info)); + (void)vio_ldc_send(&port->vio, &info, + sizeof(info)); n_addrs = 0; } @@ -1556,7 +1563,7 @@ static void __send_mc_list(struct vnet *vp, struct vnet_port *port) } if (n_addrs) { info.count = n_addrs; - (void) vio_ldc_send(&port->vio, &info, sizeof(info)); + (void)vio_ldc_send(&port->vio, &info, sizeof(info)); } } @@ -1566,7 +1573,6 @@ void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp) rcu_read_lock(); list_for_each_entry_rcu(port, &vp->port_list, list) { - if (port->switch_port) { __update_mc_list(vp, dev); __send_mc_list(vp, port); @@ -1600,7 +1606,7 @@ void sunvnet_port_free_tx_bufs_common(struct vnet_port *port) dr = &port->vio.drings[VIO_DRIVER_TX_RING]; - if (dr->base == NULL) + if (!dr->base) return; for (i = 0; i < VNET_TX_RING_SIZE; i++) { @@ -1668,7 +1674,8 @@ static int vnet_port_alloc_tx_ring(struct vnet_port *port) dr->base = dring; dr->entry_size = elen; dr->num_entries = VNET_TX_RING_SIZE; - dr->prod = dr->cons = 0; + dr->prod = 0; + dr->cons = 0; port->start_cons = true; /* need an initial trigger */ dr->pending = VNET_TX_RING_SIZE; dr->ncookies = ncookies; @@ -1713,7 +1720,6 @@ void sunvnet_port_add_txq_common(struct vnet_port *port) port->q_index = n; netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port), port->q_index)); - } EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common); -- cgit v1.2.3 From 019ded3aa7c9799fbe6533baeac9aafc7063bd39 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 15 Mar 2016 22:47:14 +0100 Subject: net: mvneta: bm: clarify dependencies MVNETA_BM has a dependency on MVNETA, so we can only select the former if the latter is enabled. However, the code dependency is the reverse: The mvneta module can call into the mvneta_bm module, so mvneta cannot be a built-in if mvneta_bm is a module, or we get a link error: drivers/net/built-in.o: In function `mvneta_remove': drivers/net/ethernet/marvell/mvneta.c:4211: undefined reference to `mvneta_bm_pool_destroy' drivers/net/built-in.o: In function `mvneta_bm_update_mtu': drivers/net/ethernet/marvell/mvneta.c:1034: undefined reference to `mvneta_bm_bufs_free' This avoids the problem by further clarifying the dependency so that MVNETA_BM is a silent Kconfig option that gets turned on by the new MVNETA_BM_ENABLE option. This way both the core HWBM module and the MVNETA_BM code are always built-in when needed. Signed-off-by: Arnd Bergmann Fixes: dc35a10f68d3 ("net: mvneta: bm: add support for hardware buffer management") Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/Kconfig | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig index 62d80fddbe34..b5c6d42daa12 100644 --- a/drivers/net/ethernet/marvell/Kconfig +++ b/drivers/net/ethernet/marvell/Kconfig @@ -40,10 +40,9 @@ config MVMDIO This driver is used by the MV643XX_ETH and MVNETA drivers. -config MVNETA_BM +config MVNETA_BM_ENABLE tristate "Marvell Armada 38x/XP network interface BM support" depends on MVNETA - select HWBM ---help--- This driver supports auxiliary block of the network interface units in the Marvell ARMADA XP and ARMADA 38x SoC @@ -67,6 +66,15 @@ config MVNETA driver, which should be used for the older Marvell SoCs (Dove, Orion, Discovery, Kirkwood). +config MVNETA_BM + tristate + default y if MVNETA=y && MVNETA_BM_ENABLE + default MVNETA_BM_ENABLE + select HWBM + help + MVNETA_BM must not be 'm' if MVNETA=y, so this symbol ensures + that all dependencies are met. + config MVPP2 tristate "Marvell Armada 375 network interface support" depends on MACH_ARMADA_375 -- cgit v1.2.3 From 3af0d554c1ce11e9d0953381ff566271f9ab81a9 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 16 Mar 2016 10:45:10 +0300 Subject: ethernet: micrel: fix some error codes There were two issues here: 1) dma_mapping_error() return true/false but we want to return -ENOMEM 2) If dmaengine_prep_slave_sg() failed then "err" wasn't set but presumably that should be -ENOMEM as well. I changed the success path to "return 0;" instead of "return ret;" for clarity. Fixes: 94fe8c683cea ('ks8842: Support DMA when accessed via timberdale') Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller --- drivers/net/ethernet/micrel/ks8842.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c index 09d2e16fd6b0..cb0102dd7f70 100644 --- a/drivers/net/ethernet/micrel/ks8842.c +++ b/drivers/net/ethernet/micrel/ks8842.c @@ -561,8 +561,8 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) sg_init_table(sg, 1); sg_dma_address(sg) = dma_map_single(adapter->dev, ctl->skb->data, DMA_BUFFER_SIZE, DMA_FROM_DEVICE); - err = dma_mapping_error(adapter->dev, sg_dma_address(sg)); - if (unlikely(err)) { + if (dma_mapping_error(adapter->dev, sg_dma_address(sg))) { + err = -ENOMEM; sg_dma_address(sg) = 0; goto out; } @@ -572,8 +572,10 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) ctl->adesc = dmaengine_prep_slave_sg(ctl->chan, sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); - if (!ctl->adesc) + if (!ctl->adesc) { + err = -ENOMEM; goto out; + } ctl->adesc->callback_param = netdev; ctl->adesc->callback = ks8842_dma_rx_cb; @@ -584,7 +586,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev) goto out; } - return err; + return 0; out: if (sg_dma_address(sg)) dma_unmap_single(adapter->dev, sg_dma_address(sg), -- cgit v1.2.3 From 1098cee6e1b11e4fe80b2765eb54d412d4294f03 Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Wed, 16 Mar 2016 09:59:15 +0000 Subject: bonding: remove duplicate set of flag IFF_MULTICAST Remove unnecessary set of flag IFF_MULTICAST, since ether_setup already does this. Signed-off-by: Zhang Shengju Reviewed-by: Nikolay Aleksandrov Signed-off-by: Andy Gospodarek Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b6236ff3dbdd..270b39c8357f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4175,7 +4175,7 @@ void bond_setup(struct net_device *bond_dev) SET_NETDEV_DEVTYPE(bond_dev, &bond_type); /* Initialize the device options */ - bond_dev->flags |= IFF_MASTER|IFF_MULTICAST; + bond_dev->flags |= IFF_MASTER; bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE; bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); -- cgit v1.2.3 From bc0df13887b1147f4e63390d7d6767f7fa64b91b Mon Sep 17 00:00:00 2001 From: Zhang Shengju Date: Wed, 16 Mar 2016 09:59:16 +0000 Subject: team: remove duplicate set of flag IFF_MULTICAST Remove unnecessary set of flag IFF_MULTICAST, since ether_setup already does this. Signed-off-by: Zhang Shengju Acked-by: Jiri Pirko Signed-off-by: David S. Miller --- drivers/net/team/team.c | 1 - 1 file changed, 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2769835f48ca..26c64d2782fa 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2082,7 +2082,6 @@ static void team_setup(struct net_device *dev) dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; - dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_NO_QUEUE; dev->priv_flags |= IFF_TEAM; -- cgit v1.2.3 From 79d3b59a93ba25f3b2c72eb4099c189d41d30204 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Wed, 16 Mar 2016 18:26:02 +0100 Subject: net: smc911x: convert pxa dma to dmaengine Convert the dma transfers to be dmaengine based, now pxa has a dmaengine slave driver. This makes this driver a bit more PXA agnostic. The driver was only compile tested. The risk is quite small as no current PXA platform I'm aware of is using smc911x driver. Signed-off-by: Robert Jarzmik Tested-by: Fabio Estevam Signed-off-by: David S. Miller --- drivers/net/ethernet/smsc/smc911x.c | 85 ++++++++++++++++++++++++------------- drivers/net/ethernet/smsc/smc911x.h | 63 ++++++++++++--------------- 2 files changed, 82 insertions(+), 66 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index bd64eb982e52..3f5711061432 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -73,6 +73,9 @@ static const char version[] = #include #include +#include +#include + #include #include "smc911x.h" @@ -1174,18 +1177,16 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id) #ifdef SMC_USE_DMA static void -smc911x_tx_dma_irq(int dma, void *data) +smc911x_tx_dma_irq(void *data) { - struct net_device *dev = (struct net_device *)data; - struct smc911x_local *lp = netdev_priv(dev); + struct smc911x_local *lp = data; + struct net_device *dev = lp->netdev; struct sk_buff *skb = lp->current_tx_skb; unsigned long flags; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n"); - /* Clear the DMA interrupt sources */ - SMC_DMA_ACK_IRQ(dev, dma); BUG_ON(skb == NULL); dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE); dev->trans_start = jiffies; @@ -1208,18 +1209,16 @@ smc911x_tx_dma_irq(int dma, void *data) "TX DMA irq completed\n"); } static void -smc911x_rx_dma_irq(int dma, void *data) +smc911x_rx_dma_irq(void *data) { - struct net_device *dev = (struct net_device *)data; - struct smc911x_local *lp = netdev_priv(dev); + struct smc911x_local *lp = data; + struct net_device *dev = lp->netdev; struct sk_buff *skb = lp->current_rx_skb; unsigned long flags; unsigned int pkts; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, dev, "RX DMA irq handler\n"); - /* Clear the DMA interrupt sources */ - SMC_DMA_ACK_IRQ(dev, dma); dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE); BUG_ON(skb == NULL); lp->current_rx_skb = NULL; @@ -1792,6 +1791,9 @@ static int smc911x_probe(struct net_device *dev) unsigned int val, chip_id, revision; const char *version_string; unsigned long irq_flags; + struct dma_slave_config config; + dma_cap_mask_t mask; + struct pxad_param param; DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__); @@ -1963,11 +1965,40 @@ static int smc911x_probe(struct net_device *dev) goto err_out; #ifdef SMC_USE_DMA - lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq); - lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + param.prio = PXAD_PRIO_LOWEST; + param.drcmr = -1UL; + + lp->rxdma = + dma_request_slave_channel_compat(mask, pxad_filter_fn, + ¶m, &dev->dev, "rx"); + lp->txdma = + dma_request_slave_channel_compat(mask, pxad_filter_fn, + ¶m, &dev->dev, "tx"); lp->rxdma_active = 0; lp->txdma_active = 0; - dev->dma = lp->rxdma; + + memset(&config, 0, sizeof(config)); + config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + config.src_addr = lp->physaddr + RX_DATA_FIFO; + config.dst_addr = lp->physaddr + TX_DATA_FIFO; + config.src_maxburst = 32; + config.dst_maxburst = 32; + retval = dmaengine_slave_config(lp->rxdma, &config); + if (retval) { + dev_err(lp->dev, "dma rx channel configuration failed: %d\n", + retval); + goto err_out; + } + retval = dmaengine_slave_config(lp->txdma, &config); + if (retval) { + dev_err(lp->dev, "dma tx channel configuration failed: %d\n", + retval); + goto err_out; + } #endif retval = register_netdev(dev); @@ -1978,11 +2009,11 @@ static int smc911x_probe(struct net_device *dev) dev->base_addr, dev->irq); #ifdef SMC_USE_DMA - if (lp->rxdma != -1) - pr_cont(" RXDMA %d", lp->rxdma); + if (lp->rxdma) + pr_cont(" RXDMA %p", lp->rxdma); - if (lp->txdma != -1) - pr_cont(" TXDMA %d", lp->txdma); + if (lp->txdma) + pr_cont(" TXDMA %p", lp->txdma); #endif pr_cont("\n"); if (!is_valid_ether_addr(dev->dev_addr)) { @@ -2005,12 +2036,10 @@ static int smc911x_probe(struct net_device *dev) err_out: #ifdef SMC_USE_DMA if (retval) { - if (lp->rxdma != -1) { - SMC_DMA_FREE(dev, lp->rxdma); - } - if (lp->txdma != -1) { - SMC_DMA_FREE(dev, lp->txdma); - } + if (lp->rxdma) + dma_release_channel(lp->rxdma); + if (lp->txdma) + dma_release_channel(lp->txdma); } #endif return retval; @@ -2112,12 +2141,10 @@ static int smc911x_drv_remove(struct platform_device *pdev) #ifdef SMC_USE_DMA { - if (lp->rxdma != -1) { - SMC_DMA_FREE(dev, lp->rxdma); - } - if (lp->txdma != -1) { - SMC_DMA_FREE(dev, lp->txdma); - } + if (lp->rxdma) + dma_release_channel(lp->rxdma); + if (lp->txdma) + dma_release_channel(lp->txdma); } #endif iounmap(lp->base); diff --git a/drivers/net/ethernet/smsc/smc911x.h b/drivers/net/ethernet/smsc/smc911x.h index 04b35f55df97..fa528ea0ea51 100644 --- a/drivers/net/ethernet/smsc/smc911x.h +++ b/drivers/net/ethernet/smsc/smc911x.h @@ -101,8 +101,8 @@ struct smc911x_local { #ifdef SMC_USE_DMA /* DMA needs the physical address of the chip */ u_long physaddr; - int rxdma; - int txdma; + struct dma_chan *rxdma; + struct dma_chan *txdma; int rxdma_active; int txdma_active; struct sk_buff *current_rx_skb; @@ -210,27 +210,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg, #ifdef SMC_USE_PXA_DMA -#include - -/* - * Define the request and free functions - * These are unfortunately architecture specific as no generic allocation - * mechanism exits - */ -#define SMC_DMA_REQUEST(dev, handler) \ - pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev) - -#define SMC_DMA_FREE(dev, dma) \ - pxa_free_dma(dma) - -#define SMC_DMA_ACK_IRQ(dev, dma) \ -{ \ - if (DCSR(dma) & DCSR_BUSERR) { \ - netdev_err(dev, "DMA %d bus error!\n", dma); \ - } \ - DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; \ -} - /* * Use a DMA for RX and TX packets. */ @@ -238,6 +217,8 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg, static dma_addr_t rx_dmabuf, tx_dmabuf; static int rx_dmalen, tx_dmalen; +static void smc911x_rx_dma_irq(void *data); +static void smc911x_tx_dma_irq(void *data); #ifdef SMC_insl #undef SMC_insl @@ -246,8 +227,10 @@ static int rx_dmalen, tx_dmalen; static inline void smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, - int reg, int dma, u_char *buf, int len) + int reg, struct dma_chan *dma, u_char *buf, int len) { + struct dma_async_tx_descriptor *tx; + /* 64 bit alignment is required for memory to memory DMA */ if ((long)buf & 4) { *((u32 *)buf) = SMC_inl(lp, reg); @@ -258,12 +241,14 @@ smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, len *= 4; rx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_FROM_DEVICE); rx_dmalen = len; - DCSR(dma) = DCSR_NODESC; - DTADR(dma) = rx_dmabuf; - DSADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 | - DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; + tx = dmaengine_prep_slave_single(dma, rx_dmabuf, rx_dmalen, + DMA_DEV_TO_MEM, 0); + if (tx) { + tx->callback = smc911x_rx_dma_irq; + tx->callback_param = lp; + dmaengine_submit(tx); + dma_async_issue_pending(dma); + } } #endif @@ -274,8 +259,10 @@ smc_pxa_dma_insl(struct smc911x_local *lp, u_long physaddr, static inline void smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, - int reg, int dma, u_char *buf, int len) + int reg, struct dma_chan *dma, u_char *buf, int len) { + struct dma_async_tx_descriptor *tx; + /* 64 bit alignment is required for memory to memory DMA */ if ((long)buf & 4) { SMC_outl(*((u32 *)buf), lp, reg); @@ -286,12 +273,14 @@ smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr, len *= 4; tx_dmabuf = dma_map_single(lp->dev, buf, len, DMA_TO_DEVICE); tx_dmalen = len; - DCSR(dma) = DCSR_NODESC; - DSADR(dma) = tx_dmabuf; - DTADR(dma) = physaddr + reg; - DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 | - DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen)); - DCSR(dma) = DCSR_NODESC | DCSR_RUN; + tx = dmaengine_prep_slave_single(dma, tx_dmabuf, tx_dmalen, + DMA_DEV_TO_MEM, 0); + if (tx) { + tx->callback = smc911x_tx_dma_irq; + tx->callback_param = lp; + dmaengine_submit(tx); + dma_async_issue_pending(dma); + } } #endif #endif /* SMC_USE_PXA_DMA */ -- cgit v1.2.3 From 20ff55655a93554611fb7790c8a2d29ee4598d24 Mon Sep 17 00:00:00 2001 From: Woojung Huh Date: Wed, 16 Mar 2016 22:10:40 +0000 Subject: lan78xx: handle statistics counter rollover Update to handle statistics counter rollover. Check statistics counter periodically and compensate it when counter value rolls over at max (20 or 32bits). Simple mechanism adjusts monitoring timer to allow USB auto suspend. Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 252 +++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 239 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 705c180163c5..f20890ee03f3 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -36,7 +36,7 @@ #define DRIVER_AUTHOR "WOOJUNG HUH " #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.3" +#define DRIVER_VERSION "1.0.4" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@ -86,6 +86,9 @@ /* default autosuspend delay (mSec)*/ #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000) +/* statistic update interval (mSec) */ +#define STAT_UPDATE_TIMER (1 * 1000) + static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = { "RX FCS Errors", "RX Alignment Errors", @@ -186,6 +189,56 @@ struct lan78xx_statstage { u32 eee_tx_lpi_time; }; +struct lan78xx_statstage64 { + u64 rx_fcs_errors; + u64 rx_alignment_errors; + u64 rx_fragment_errors; + u64 rx_jabber_errors; + u64 rx_undersize_frame_errors; + u64 rx_oversize_frame_errors; + u64 rx_dropped_frames; + u64 rx_unicast_byte_count; + u64 rx_broadcast_byte_count; + u64 rx_multicast_byte_count; + u64 rx_unicast_frames; + u64 rx_broadcast_frames; + u64 rx_multicast_frames; + u64 rx_pause_frames; + u64 rx_64_byte_frames; + u64 rx_65_127_byte_frames; + u64 rx_128_255_byte_frames; + u64 rx_256_511_bytes_frames; + u64 rx_512_1023_byte_frames; + u64 rx_1024_1518_byte_frames; + u64 rx_greater_1518_byte_frames; + u64 eee_rx_lpi_transitions; + u64 eee_rx_lpi_time; + u64 tx_fcs_errors; + u64 tx_excess_deferral_errors; + u64 tx_carrier_errors; + u64 tx_bad_byte_count; + u64 tx_single_collisions; + u64 tx_multiple_collisions; + u64 tx_excessive_collision; + u64 tx_late_collisions; + u64 tx_unicast_byte_count; + u64 tx_broadcast_byte_count; + u64 tx_multicast_byte_count; + u64 tx_unicast_frames; + u64 tx_broadcast_frames; + u64 tx_multicast_frames; + u64 tx_pause_frames; + u64 tx_64_byte_frames; + u64 tx_65_127_byte_frames; + u64 tx_128_255_byte_frames; + u64 tx_256_511_bytes_frames; + u64 tx_512_1023_byte_frames; + u64 tx_1024_1518_byte_frames; + u64 tx_greater_1518_byte_frames; + u64 eee_tx_lpi_transitions; + u64 eee_tx_lpi_time; +}; + struct lan78xx_net; struct lan78xx_priv { @@ -232,6 +285,15 @@ struct usb_context { #define EVENT_DEV_WAKING 6 #define EVENT_DEV_ASLEEP 7 #define EVENT_DEV_OPEN 8 +#define EVENT_STAT_UPDATE 9 + +struct statstage { + struct mutex access_lock; /* for stats access */ + struct lan78xx_statstage saved; + struct lan78xx_statstage rollover_count; + struct lan78xx_statstage rollover_max; + struct lan78xx_statstage64 curr_stat; +}; struct lan78xx_net { struct net_device *net; @@ -272,6 +334,7 @@ struct lan78xx_net { unsigned maxpacket; struct timer_list delay; + struct timer_list stat_monitor; unsigned long data[5]; @@ -284,6 +347,9 @@ struct lan78xx_net { int fc_autoneg; u8 fc_request_control; + + int delta; + struct statstage stats; }; /* use ethtool to change the level for any given device */ @@ -382,6 +448,93 @@ static int lan78xx_read_stats(struct lan78xx_net *dev, return ret; } +#define check_counter_rollover(struct1, dev_stats, member) { \ + if (struct1->member < dev_stats.saved.member) \ + dev_stats.rollover_count.member++; \ + } + +static void lan78xx_check_stat_rollover(struct lan78xx_net *dev, + struct lan78xx_statstage *stats) +{ + check_counter_rollover(stats, dev->stats, rx_fcs_errors); + check_counter_rollover(stats, dev->stats, rx_alignment_errors); + check_counter_rollover(stats, dev->stats, rx_fragment_errors); + check_counter_rollover(stats, dev->stats, rx_jabber_errors); + check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors); + check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors); + check_counter_rollover(stats, dev->stats, rx_dropped_frames); + check_counter_rollover(stats, dev->stats, rx_unicast_byte_count); + check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count); + check_counter_rollover(stats, dev->stats, rx_multicast_byte_count); + check_counter_rollover(stats, dev->stats, rx_unicast_frames); + check_counter_rollover(stats, dev->stats, rx_broadcast_frames); + check_counter_rollover(stats, dev->stats, rx_multicast_frames); + check_counter_rollover(stats, dev->stats, rx_pause_frames); + check_counter_rollover(stats, dev->stats, rx_64_byte_frames); + check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames); + check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames); + check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames); + check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames); + check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames); + check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames); + check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions); + check_counter_rollover(stats, dev->stats, eee_rx_lpi_time); + check_counter_rollover(stats, dev->stats, tx_fcs_errors); + check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors); + check_counter_rollover(stats, dev->stats, tx_carrier_errors); + check_counter_rollover(stats, dev->stats, tx_bad_byte_count); + check_counter_rollover(stats, dev->stats, tx_single_collisions); + check_counter_rollover(stats, dev->stats, tx_multiple_collisions); + check_counter_rollover(stats, dev->stats, tx_excessive_collision); + check_counter_rollover(stats, dev->stats, tx_late_collisions); + check_counter_rollover(stats, dev->stats, tx_unicast_byte_count); + check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count); + check_counter_rollover(stats, dev->stats, tx_multicast_byte_count); + check_counter_rollover(stats, dev->stats, tx_unicast_frames); + check_counter_rollover(stats, dev->stats, tx_broadcast_frames); + check_counter_rollover(stats, dev->stats, tx_multicast_frames); + check_counter_rollover(stats, dev->stats, tx_pause_frames); + check_counter_rollover(stats, dev->stats, tx_64_byte_frames); + check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames); + check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames); + check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames); + check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames); + check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames); + check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames); + check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions); + check_counter_rollover(stats, dev->stats, eee_tx_lpi_time); + + memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage)); +} + +static void lan78xx_update_stats(struct lan78xx_net *dev) +{ + u32 *p, *count, *max; + u64 *data; + int i; + struct lan78xx_statstage lan78xx_stats; + + if (usb_autopm_get_interface(dev->intf) < 0) + return; + + p = (u32 *)&lan78xx_stats; + count = (u32 *)&dev->stats.rollover_count; + max = (u32 *)&dev->stats.rollover_max; + data = (u64 *)&dev->stats.curr_stat; + + mutex_lock(&dev->stats.access_lock); + + if (lan78xx_read_stats(dev, &lan78xx_stats) > 0) + lan78xx_check_stat_rollover(dev, &lan78xx_stats); + + for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++) + data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1)); + + mutex_unlock(&dev->stats.access_lock); + + usb_autopm_put_interface(dev->intf); +} + /* Loop until the read is completed with timeout called with phy_mutex held */ static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev) { @@ -967,6 +1120,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) return -EIO; phy_mac_interrupt(phydev, 0); + + del_timer(&dev->stat_monitor); } else if (phydev->link && !dev->link_on) { dev->link_on = true; @@ -1007,6 +1162,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); phy_mac_interrupt(phydev, 1); + + if (!timer_pending(&dev->stat_monitor)) { + dev->delta = 1; + mod_timer(&dev->stat_monitor, + jiffies + STAT_UPDATE_TIMER); + } } return ret; @@ -1099,20 +1260,12 @@ static void lan78xx_get_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct lan78xx_net *dev = netdev_priv(netdev); - struct lan78xx_statstage lan78xx_stat; - u32 *p; - int i; - if (usb_autopm_get_interface(dev->intf) < 0) - return; + lan78xx_update_stats(dev); - if (lan78xx_read_stats(dev, &lan78xx_stat) > 0) { - p = (u32 *)&lan78xx_stat; - for (i = 0; i < (sizeof(lan78xx_stat) / (sizeof(u32))); i++) - data[i] = p[i]; - } - - usb_autopm_put_interface(dev->intf); + mutex_lock(&dev->stats.access_lock); + memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat)); + mutex_unlock(&dev->stats.access_lock); } static void lan78xx_get_wol(struct net_device *netdev, @@ -2095,6 +2248,32 @@ static int lan78xx_reset(struct lan78xx_net *dev) return 0; } +static void lan78xx_init_stats(struct lan78xx_net *dev) +{ + u32 *p; + int i; + + /* initialize for stats update + * some counters are 20bits and some are 32bits + */ + p = (u32 *)&dev->stats.rollover_max; + for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++) + p[i] = 0xFFFFF; + + dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF; + dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF; + dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF; + dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF; + dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF; + dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF; + dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF; + dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF; + dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF; + dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF; + + lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE); +} + static int lan78xx_open(struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); @@ -2122,6 +2301,8 @@ static int lan78xx_open(struct net_device *net) } } + lan78xx_init_stats(dev); + set_bit(EVENT_DEV_OPEN, &dev->flags); netif_start_queue(net); @@ -2166,6 +2347,9 @@ int lan78xx_stop(struct net_device *net) { struct lan78xx_net *dev = netdev_priv(net); + if (timer_pending(&dev->stat_monitor)) + del_timer_sync(&dev->stat_monitor); + phy_stop(net->phydev); phy_disconnect(net->phydev); net->phydev = NULL; @@ -2910,6 +3094,13 @@ static void lan78xx_bh(unsigned long param) } if (netif_device_present(dev->net) && netif_running(dev->net)) { + /* reset update timer delta */ + if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) { + dev->delta = 1; + mod_timer(&dev->stat_monitor, + jiffies + STAT_UPDATE_TIMER); + } + if (!skb_queue_empty(&dev->txq_pend)) lan78xx_tx_bh(dev); @@ -2984,6 +3175,17 @@ skip_reset: usb_autopm_put_interface(dev->intf); } } + + if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) { + lan78xx_update_stats(dev); + + clear_bit(EVENT_STAT_UPDATE, &dev->flags); + + mod_timer(&dev->stat_monitor, + jiffies + (STAT_UPDATE_TIMER * dev->delta)); + + dev->delta = min((dev->delta * 2), 50); + } } static void intr_complete(struct urb *urb) @@ -3074,6 +3276,15 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, }; +static void lan78xx_stat_monitor(unsigned long param) +{ + struct lan78xx_net *dev; + + dev = (struct lan78xx_net *)param; + + lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE); +} + static int lan78xx_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -3120,6 +3331,13 @@ static int lan78xx_probe(struct usb_interface *intf, netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES; netdev->ethtool_ops = &lan78xx_ethtool_ops; + dev->stat_monitor.function = lan78xx_stat_monitor; + dev->stat_monitor.data = (unsigned long)dev; + dev->delta = 1; + init_timer(&dev->stat_monitor); + + mutex_init(&dev->stats.access_lock); + ret = lan78xx_bind(dev, intf); if (ret < 0) goto out2; @@ -3397,6 +3615,8 @@ int lan78xx_suspend(struct usb_interface *intf, pm_message_t message) } if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) { + del_timer(&dev->stat_monitor); + if (PMSG_IS_AUTO(message)) { /* auto suspend (selective suspend) */ ret = lan78xx_read_reg(dev, MAC_TX, &buf); @@ -3457,6 +3677,12 @@ int lan78xx_resume(struct usb_interface *intf) int ret; u32 buf; + if (!timer_pending(&dev->stat_monitor)) { + dev->delta = 1; + mod_timer(&dev->stat_monitor, + jiffies + STAT_UPDATE_TIMER); + } + if (!--dev->suspend_count) { /* resume interrupt URBs */ if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags)) -- cgit v1.2.3 From a59f8c5b048dc938fb958c91c282c865cd845705 Mon Sep 17 00:00:00 2001 From: Woojung Huh Date: Wed, 16 Mar 2016 22:10:42 +0000 Subject: lan78xx: add ndo_get_stats64 Add lan78xx_get_stats64 of ndo_get_stats64 to report all statistics counters including errors from HW statistics. Read from HW when auto suspend is disabled, use saved counter when auto suspend is enabled because periodic call to ndo_get_stats64 prevents USB auto suspend. Signed-off-by: Woojung Huh Signed-off-by: David S. Miller --- drivers/net/usb/lan78xx.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) (limited to 'drivers') diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index f20890ee03f3..d36d5ebf37f3 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -3261,6 +3261,54 @@ void lan78xx_tx_timeout(struct net_device *net) tasklet_schedule(&dev->bh); } +struct rtnl_link_stats64 *lan78xx_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *storage) +{ + struct lan78xx_net *dev = netdev_priv(netdev); + struct lan78xx_statstage64 stats; + + /* curr_stat is updated by timer. + * periodic reading from HW will prevent from entering USB auto suspend. + * if autosuspend is disabled, read from HW. + */ + if (!dev->udev->dev.power.runtime_auto) + lan78xx_update_stats(dev); + + mutex_lock(&dev->stats.access_lock); + memcpy(&stats, &dev->stats.curr_stat, sizeof(stats)); + mutex_unlock(&dev->stats.access_lock); + + /* calc by driver */ + storage->rx_packets = (__u64)netdev->stats.rx_packets; + storage->tx_packets = (__u64)netdev->stats.tx_packets; + storage->rx_bytes = (__u64)netdev->stats.rx_bytes; + storage->tx_bytes = (__u64)netdev->stats.tx_bytes; + + /* use counter */ + storage->rx_length_errors = stats.rx_undersize_frame_errors + + stats.rx_oversize_frame_errors; + storage->rx_crc_errors = stats.rx_fcs_errors; + storage->rx_frame_errors = stats.rx_alignment_errors; + storage->rx_fifo_errors = stats.rx_dropped_frames; + storage->rx_over_errors = stats.rx_oversize_frame_errors; + storage->rx_errors = stats.rx_fcs_errors + + stats.rx_alignment_errors + + stats.rx_fragment_errors + + stats.rx_jabber_errors + + stats.rx_undersize_frame_errors + + stats.rx_oversize_frame_errors + + stats.rx_dropped_frames; + + storage->tx_carrier_errors = stats.tx_carrier_errors; + storage->tx_errors = stats.tx_fcs_errors + + stats.tx_excess_deferral_errors + + stats.tx_carrier_errors; + + storage->multicast = stats.rx_multicast_frames; + + return storage; +} + static const struct net_device_ops lan78xx_netdev_ops = { .ndo_open = lan78xx_open, .ndo_stop = lan78xx_stop, @@ -3274,6 +3322,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, + .ndo_get_stats64 = lan78xx_get_stats64, }; static void lan78xx_stat_monitor(unsigned long param) -- cgit v1.2.3 From e2ad1f976b721df383ff12c12a6dcc805cbb80f3 Mon Sep 17 00:00:00 2001 From: Andreas Färber Date: Thu, 17 Mar 2016 00:23:37 +0100 Subject: phy: mdio-thunder: Fix some Kconfig typos MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Drop two extra occurrences of "on" in option title and help text. Fixes: 379d7ac7ca31 ("phy: mdio-thunder: Add driver for Cavium Thunder SoC MDIO buses.") Cc: David Daney Signed-off-by: Andreas Färber Acked-by: David Daney Signed-off-by: David S. Miller --- drivers/net/phy/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 075a4cc175b1..6dad9a9c356c 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -197,13 +197,13 @@ config MDIO_OCTEON drivers on some systems. config MDIO_THUNDER - tristate "Support for MDIO buses on on ThunderX SOCs" + tristate "Support for MDIO buses on ThunderX SOCs" depends on 64BIT depends on PCI select MDIO_CAVIUM help This driver supports the MDIO interfaces found on Cavium - ThunderX SoCs when the MDIO bus device appears on as a PCI + ThunderX SoCs when the MDIO bus device appears as a PCI device. -- cgit v1.2.3 From 76e39ccf9c36352a8fc7c084ec25b735010685fd Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Thu, 17 Mar 2016 18:49:42 +0200 Subject: net/mlx4_core: Fix backward compatibility on VFs Commit 85743f1eb345 ("net/mlx4_core: Set UAR page size to 4KB regardless of system page size") introduced dependency where old VF drivers without this fix fail to load if the PF driver runs with this commit. To resolve this add a module parameter which disables that functionality by default. If both the PF and VFs are running with a driver with that commit the administrator may set the module param to true. The module parameter is called enable_4k_uar. Fixes: 85743f1eb345 ('net/mlx4_core: Set UAR page size to 4KB ...') Signed-off-by: Eli Cohen Tested-by: Alexey Kardashevskiy Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/main.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 503ec23e84cc..358f7230da58 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -105,6 +105,11 @@ module_param(enable_64b_cqe_eqe, bool, 0444); MODULE_PARM_DESC(enable_64b_cqe_eqe, "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); +static bool enable_4k_uar; +module_param(enable_4k_uar, bool, 0444); +MODULE_PARM_DESC(enable_4k_uar, + "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)"); + #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ MLX4_FUNC_CAP_DMFS_A0_STATIC) @@ -423,7 +428,11 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) /* Virtual PCI function needs to determine UAR page size from * firmware. Only master PCI function can set the uar page size */ - dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; + if (enable_4k_uar) + dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; + else + dev->uar_page_shift = PAGE_SHIFT; + mlx4_set_num_reserved_uars(dev, dev_cap); } @@ -2233,11 +2242,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev) dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; - /* Always set UAR page size 4KB, set log_uar_sz accordingly */ - init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + - PAGE_SHIFT - - DEFAULT_UAR_PAGE_SHIFT; - init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + if (enable_4k_uar) { + init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + + PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT; + init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; + } else { + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); + init_hca.uar_page_sz = PAGE_SHIFT - 12; + } init_hca.mw_enabled = 0; if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || -- cgit v1.2.3 From eee577232203842b4dcadb7ab477a298479633ed Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 17 Mar 2016 11:57:06 -0700 Subject: net: bcmgenet: fix dma api length mismatch When un-mapping skb->data in __bcmgenet_tx_reclaim(), we must use the length that was used in original dma_map_single(), instead of skb->len that might be bigger (includes the frags) We simply can store skb_len into tx_cb_ptr->dma_len and use it at unmap time. Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Signed-off-by: Eric Dumazet Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index d7e01a74e927..6746fd03cb3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1197,7 +1197,7 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, dma_unmap_addr(tx_cb_ptr, dma_addr), - tx_cb_ptr->skb->len, + dma_unmap_len(tx_cb_ptr, dma_len), DMA_TO_DEVICE); bcmgenet_free_cb(tx_cb_ptr); } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { @@ -1308,7 +1308,7 @@ static int bcmgenet_xmit_single(struct net_device *dev, } dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len); + dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | DMA_TX_APPEND_CRC; -- cgit v1.2.3 From fe30937b65354c7fec244caebbdaae68e28ca797 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 17 Mar 2016 17:23:36 -0700 Subject: bonding: fix bond_get_stats() bond_get_stats() can be called from rtnetlink (with RTNL held) or from /proc/net/dev seq handler (with RCU held) The logic added in commit 5f0c5f73e5ef ("bonding: make global bonding stats more reliable") kind of assumed only one cpu could run there. If multiple threads are reading /proc/net/dev, stats can be really messed up after a while. A second problem is that some fields are 32bit, so we need to properly handle the wrap around problem. Given that RTNL is not always held, we need to use bond_for_each_slave_rcu(). Fixes: 5f0c5f73e5ef ("bonding: make global bonding stats more reliable") Signed-off-by: Eric Dumazet Cc: Andy Gospodarek Cc: Jay Vosburgh Cc: Veaceslav Falico Reviewed-by: Nikolay Aleksandrov Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 66 ++++++++++++++++++++++------------------- include/net/bonding.h | 1 + 2 files changed, 36 insertions(+), 31 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 270b39c8357f..941ec99cd3b6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3301,6 +3301,30 @@ static int bond_close(struct net_device *bond_dev) return 0; } +/* fold stats, assuming all rtnl_link_stats64 fields are u64, but + * that some drivers can provide 32bit values only. + */ +static void bond_fold_stats(struct rtnl_link_stats64 *_res, + const struct rtnl_link_stats64 *_new, + const struct rtnl_link_stats64 *_old) +{ + const u64 *new = (const u64 *)_new; + const u64 *old = (const u64 *)_old; + u64 *res = (u64 *)_res; + int i; + + for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { + u64 nv = new[i]; + u64 ov = old[i]; + + /* detects if this particular field is 32bit only */ + if (((nv | ov) >> 32) == 0) + res[i] += (u32)nv - (u32)ov; + else + res[i] += nv - ov; + } +} + static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, struct rtnl_link_stats64 *stats) { @@ -3309,44 +3333,23 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, struct list_head *iter; struct slave *slave; + spin_lock(&bond->stats_lock); memcpy(stats, &bond->bond_stats, sizeof(*stats)); - bond_for_each_slave(bond, slave, iter) { - const struct rtnl_link_stats64 *sstats = + rcu_read_lock(); + bond_for_each_slave_rcu(bond, slave, iter) { + const struct rtnl_link_stats64 *new = dev_get_stats(slave->dev, &temp); - struct rtnl_link_stats64 *pstats = &slave->slave_stats; - - stats->rx_packets += sstats->rx_packets - pstats->rx_packets; - stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes; - stats->rx_errors += sstats->rx_errors - pstats->rx_errors; - stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped; - stats->rx_nohandler += sstats->rx_nohandler - pstats->rx_nohandler; - - stats->tx_packets += sstats->tx_packets - pstats->tx_packets;; - stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes; - stats->tx_errors += sstats->tx_errors - pstats->tx_errors; - stats->tx_dropped += sstats->tx_dropped - pstats->tx_dropped; - - stats->multicast += sstats->multicast - pstats->multicast; - stats->collisions += sstats->collisions - pstats->collisions; - - stats->rx_length_errors += sstats->rx_length_errors - pstats->rx_length_errors; - stats->rx_over_errors += sstats->rx_over_errors - pstats->rx_over_errors; - stats->rx_crc_errors += sstats->rx_crc_errors - pstats->rx_crc_errors; - stats->rx_frame_errors += sstats->rx_frame_errors - pstats->rx_frame_errors; - stats->rx_fifo_errors += sstats->rx_fifo_errors - pstats->rx_fifo_errors; - stats->rx_missed_errors += sstats->rx_missed_errors - pstats->rx_missed_errors; - - stats->tx_aborted_errors += sstats->tx_aborted_errors - pstats->tx_aborted_errors; - stats->tx_carrier_errors += sstats->tx_carrier_errors - pstats->tx_carrier_errors; - stats->tx_fifo_errors += sstats->tx_fifo_errors - pstats->tx_fifo_errors; - stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors - pstats->tx_heartbeat_errors; - stats->tx_window_errors += sstats->tx_window_errors - pstats->tx_window_errors; + + bond_fold_stats(stats, new, &slave->slave_stats); /* save off the slave stats for the next run */ - memcpy(pstats, sstats, sizeof(*sstats)); + memcpy(&slave->slave_stats, new, sizeof(*new)); } + rcu_read_unlock(); + memcpy(&bond->bond_stats, stats, sizeof(*stats)); + spin_unlock(&bond->stats_lock); return stats; } @@ -4160,6 +4163,7 @@ void bond_setup(struct net_device *bond_dev) struct bonding *bond = netdev_priv(bond_dev); spin_lock_init(&bond->mode_lock); + spin_lock_init(&bond->stats_lock); bond->params = bonding_defaults; /* Initialize pointers */ diff --git a/include/net/bonding.h b/include/net/bonding.h index ee6c52053aa3..791800ddd6d9 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -215,6 +215,7 @@ struct bonding { * ALB mode (6) - to sync the use and modifications of its hash table */ spinlock_t mode_lock; + spinlock_t stats_lock; u8 send_peer_notif; u8 igmp_retrans; #ifdef CONFIG_PROC_FS -- cgit v1.2.3