diff options
| author | Jani Nikula <jani.nikula@intel.com> | 2025-06-09 12:40:46 +0300 | 
|---|---|---|
| committer | Jani Nikula <jani.nikula@intel.com> | 2025-06-09 12:40:46 +0300 | 
| commit | 34c55367af96f62e89221444f04487440ebc6487 (patch) | |
| tree | fdb36ba67d7dea09455b55037e26043b7e051ef9 /drivers/net/ethernet/intel/ice/ice_main.c | |
| parent | 7247efca0dcbc8ac6147db9200ed1549c0662465 (diff) | |
| parent | 19272b37aa4f83ca52bdf9c16d5d81bdd1354494 (diff) | |
| download | linux-34c55367af96f62e89221444f04487440ebc6487.tar.xz | |
Merge drm/drm-next into drm-intel-next
Sync to v6.16-rc1, among other things to get the fixed size GENMASK_U*()
and BIT_U*() macros.
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ice/ice_main.c')
| -rw-r--r-- | drivers/net/ethernet/intel/ice/ice_main.c | 130 | 
1 files changed, 99 insertions, 31 deletions
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index d390157b59fe..0a11b4281092 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1743,7 +1743,7 @@ static void ice_service_task_restart(struct ice_pf *pf)   */  static void ice_service_timer(struct timer_list *t)  { -	struct ice_pf *pf = from_timer(pf, t, serv_tmr); +	struct ice_pf *pf = timer_container_of(pf, t, serv_tmr);  	mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies));  	ice_service_task_schedule(pf); @@ -2401,11 +2401,11 @@ static void ice_service_task(struct work_struct *work)  	}  	if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { -		struct iidc_event *event; +		struct iidc_rdma_event *event;  		event = kzalloc(sizeof(*event), GFP_KERNEL);  		if (event) { -			set_bit(IIDC_EVENT_CRIT_ERR, event->type); +			set_bit(IIDC_RDMA_EVENT_CRIT_ERR, event->type);  			/* report the entire OICR value to AUX driver */  			swap(event->reg, pf->oicr_err_reg);  			ice_send_event_to_aux(pf, event); @@ -2424,11 +2424,11 @@ static void ice_service_task(struct work_struct *work)  		ice_plug_aux_dev(pf);  	if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { -		struct iidc_event *event; +		struct iidc_rdma_event *event;  		event = kzalloc(sizeof(*event), GFP_KERNEL);  		if (event) { -			set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); +			set_bit(IIDC_RDMA_EVENT_AFTER_MTU_CHANGE, event->type);  			ice_send_event_to_aux(pf, event);  			kfree(event);  		} @@ -2741,6 +2741,27 @@ void ice_map_xdp_rings(struct ice_vsi *vsi)  }  /** + * ice_unmap_xdp_rings - Unmap XDP rings from interrupt vectors + * @vsi: the VSI with XDP rings being unmapped + */ +static void ice_unmap_xdp_rings(struct ice_vsi *vsi) +{ +	int v_idx; + +	ice_for_each_q_vector(vsi, v_idx) { +		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; +		struct ice_tx_ring *ring; + +		ice_for_each_tx_ring(ring, q_vector->tx) +			if (!ring->tx_buf || !ice_ring_is_xdp(ring)) +				break; + +		/* restore the value of last node prior to XDP setup */ +		q_vector->tx.tx_ring = ring; +	} +} + +/**   * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP   * @vsi: VSI to bring up Tx rings used by XDP   * @prog: bpf program that will be assigned to VSI @@ -2803,7 +2824,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,  	if (status) {  		dev_err(dev, "Failed VSI LAN queue config for XDP, error: %d\n",  			status); -		goto clear_xdp_rings; +		goto unmap_xdp_rings;  	}  	/* assign the prog only when it's not already present on VSI; @@ -2819,6 +2840,8 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,  		ice_vsi_assign_bpf_prog(vsi, prog);  	return 0; +unmap_xdp_rings: +	ice_unmap_xdp_rings(vsi);  clear_xdp_rings:  	ice_for_each_xdp_txq(vsi, i)  		if (vsi->xdp_rings[i]) { @@ -2835,6 +2858,8 @@ err_map_xdp:  	mutex_unlock(&pf->avail_q_mutex);  	devm_kfree(dev, vsi->xdp_rings); +	vsi->xdp_rings = NULL; +  	return -ENOMEM;  } @@ -2850,7 +2875,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)  {  	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };  	struct ice_pf *pf = vsi->back; -	int i, v_idx; +	int i;  	/* q_vectors are freed in reset path so there's no point in detaching  	 * rings @@ -2858,17 +2883,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)  	if (cfg_type == ICE_XDP_CFG_PART)  		goto free_qmap; -	ice_for_each_q_vector(vsi, v_idx) { -		struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; -		struct ice_tx_ring *ring; - -		ice_for_each_tx_ring(ring, q_vector->tx) -			if (!ring->tx_buf || !ice_ring_is_xdp(ring)) -				break; - -		/* restore the value of last node prior to XDP setup */ -		q_vector->tx.tx_ring = ring; -	} +	ice_unmap_xdp_rings(vsi);  free_qmap:  	mutex_lock(&pf->avail_q_mutex); @@ -3013,11 +3028,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,  		xdp_ring_err = ice_vsi_determine_xdp_res(vsi);  		if (xdp_ring_err) {  			NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP"); +			goto resume_if;  		} else {  			xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,  							     ICE_XDP_CFG_FULL); -			if (xdp_ring_err) +			if (xdp_ring_err) {  				NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed"); +				goto resume_if; +			}  		}  		xdp_features_set_redirect_target(vsi->netdev, true);  		/* reallocate Rx queues that are used for zero-copy */ @@ -3035,6 +3053,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,  			NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");  	} +resume_if:  	if (if_running)  		ret = ice_up(vsi); @@ -8330,11 +8349,16 @@ void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)   * @np: net device to configure   * @filter_dev: device on which filter is added   * @cls_flower: offload data + * @ingress: if the rule is added to an ingress block + * + * Return: 0 if the flower was successfully added or deleted, + *	   negative error code otherwise.   */  static int  ice_setup_tc_cls_flower(struct ice_netdev_priv *np,  			struct net_device *filter_dev, -			struct flow_cls_offload *cls_flower) +			struct flow_cls_offload *cls_flower, +			bool ingress)  {  	struct ice_vsi *vsi = np->vsi; @@ -8343,7 +8367,7 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,  	switch (cls_flower->command) {  	case FLOW_CLS_REPLACE: -		return ice_add_cls_flower(filter_dev, vsi, cls_flower); +		return ice_add_cls_flower(filter_dev, vsi, cls_flower, ingress);  	case FLOW_CLS_DESTROY:  		return ice_del_cls_flower(vsi, cls_flower);  	default: @@ -8352,20 +8376,46 @@ ice_setup_tc_cls_flower(struct ice_netdev_priv *np,  }  /** - * ice_setup_tc_block_cb - callback handler registered for TC block + * ice_setup_tc_block_cb_ingress - callback handler for ingress TC block + * @type: TC SETUP type + * @type_data: TC flower offload data that contains user input + * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise. + */ +static int +ice_setup_tc_block_cb_ingress(enum tc_setup_type type, void *type_data, +			      void *cb_priv) +{ +	struct ice_netdev_priv *np = cb_priv; + +	switch (type) { +	case TC_SETUP_CLSFLOWER: +		return ice_setup_tc_cls_flower(np, np->vsi->netdev, +					       type_data, true); +	default: +		return -EOPNOTSUPP; +	} +} + +/** + * ice_setup_tc_block_cb_egress - callback handler for egress TC block   * @type: TC SETUP type   * @type_data: TC flower offload data that contains user input   * @cb_priv: netdev private data + * + * Return: 0 if the setup was successful, negative error code otherwise.   */  static int -ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +ice_setup_tc_block_cb_egress(enum tc_setup_type type, void *type_data, +			     void *cb_priv)  {  	struct ice_netdev_priv *np = cb_priv;  	switch (type) {  	case TC_SETUP_CLSFLOWER:  		return ice_setup_tc_cls_flower(np, np->vsi->netdev, -					       type_data); +					       type_data, false);  	default:  		return -EOPNOTSUPP;  	} @@ -9310,27 +9360,45 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,  	     void *type_data)  {  	struct ice_netdev_priv *np = netdev_priv(netdev); +	enum flow_block_binder_type binder_type; +	struct iidc_rdma_core_dev_info *cdev;  	struct ice_pf *pf = np->vsi->back; +	flow_setup_cb_t *flower_handler;  	bool locked = false;  	int err;  	switch (type) {  	case TC_SETUP_BLOCK: +		binder_type = +			((struct flow_block_offload *)type_data)->binder_type; + +		switch (binder_type) { +		case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS: +			flower_handler = ice_setup_tc_block_cb_ingress; +			break; +		case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS: +			flower_handler = ice_setup_tc_block_cb_egress; +			break; +		default: +			return -EOPNOTSUPP; +		} +  		return flow_block_cb_setup_simple(type_data,  						  &ice_block_cb_list, -						  ice_setup_tc_block_cb, -						  np, np, true); +						  flower_handler, +						  np, np, false);  	case TC_SETUP_QDISC_MQPRIO:  		if (ice_is_eswitch_mode_switchdev(pf)) {  			netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");  			return -EOPNOTSUPP;  		} -		if (pf->adev) { +		cdev = pf->cdev_info; +		if (cdev && cdev->adev) {  			mutex_lock(&pf->adev_mutex); -			device_lock(&pf->adev->dev); +			device_lock(&cdev->adev->dev);  			locked = true; -			if (pf->adev->dev.driver) { +			if (cdev->adev->dev.driver) {  				netdev_err(netdev, "Cannot change qdisc when RDMA is active\n");  				err = -EBUSY;  				goto adev_unlock; @@ -9344,7 +9412,7 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,  adev_unlock:  		if (locked) { -			device_unlock(&pf->adev->dev); +			device_unlock(&cdev->adev->dev);  			mutex_unlock(&pf->adev_mutex);  		}  		return err; @@ -9380,7 +9448,7 @@ ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,  	case TC_SETUP_CLSFLOWER:  		return ice_setup_tc_cls_flower(np, priv->netdev,  					       (struct flow_cls_offload *) -					       type_data); +					       type_data, false);  	default:  		return -EOPNOTSUPP;  	}  | 
