summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/microsoft
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/microsoft')
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig1
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c655
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c39
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_bpf.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c606
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c82
6 files changed, 1169 insertions, 216 deletions
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 901fbffbf718..3f36ee6a8ece 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -22,6 +22,7 @@ config MICROSOFT_MANA
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
+ select NET_SHAPER
help
This driver supports Microsoft Azure Network Adapter (MANA).
So far, the driver is only supported on X86_64.
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index be95336ce089..43f034e180c4 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -6,8 +6,12 @@
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <linux/msi.h>
+#include <linux/irqdomain.h>
+#include <linux/export.h>
#include <net/mana/mana.h>
+#include <net/mana/hw_channel.h>
struct dentry *mana_debugfs_root;
@@ -31,6 +35,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev)
gc->db_page_base = gc->bar0_va +
mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+ gc->phys_db_page_base = gc->bar0_pa +
+ mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF);
+
sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF);
sriov_base_va = gc->bar0_va + sriov_base_off;
@@ -63,6 +70,24 @@ static void mana_gd_init_registers(struct pci_dev *pdev)
mana_gd_init_vf_regs(pdev);
}
+/* Suppress logging when we set timeout to zero */
+bool mana_need_log(struct gdma_context *gc, int err)
+{
+ struct hw_channel_context *hwc;
+
+ if (err != -ETIMEDOUT)
+ return true;
+
+ if (!gc)
+ return true;
+
+ hwc = gc->hwc.driver_data;
+ if (hwc && hwc->hwc_timeout == 0)
+ return false;
+
+ return true;
+}
+
static int mana_gd_query_max_resources(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -80,8 +105,15 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- if (gc->num_msix_usable > resp.max_msix)
- gc->num_msix_usable = resp.max_msix;
+ if (!pci_msix_can_alloc_dyn(pdev)) {
+ if (gc->num_msix_usable > resp.max_msix)
+ gc->num_msix_usable = resp.max_msix;
+ } else {
+ /* If dynamic allocation is enabled we have already allocated
+ * hwc msi
+ */
+ gc->num_msix_usable = min(resp.max_msix, num_online_cpus() + 1);
+ }
if (gc->num_msix_usable <= 1)
return -ENOSPC;
@@ -134,9 +166,10 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
struct gdma_list_devices_resp resp = {};
struct gdma_general_req req = {};
struct gdma_dev_id dev;
- u32 i, max_num_devs;
+ int found_dev = 0;
u16 dev_type;
int err;
+ u32 i;
mana_gd_init_req_hdr(&req.hdr, GDMA_LIST_DEVICES, sizeof(req),
sizeof(resp));
@@ -148,12 +181,17 @@ static int mana_gd_detect_devices(struct pci_dev *pdev)
return err ? err : -EPROTO;
}
- max_num_devs = min_t(u32, MAX_NUM_GDMA_DEVICES, resp.num_of_devs);
-
- for (i = 0; i < max_num_devs; i++) {
+ for (i = 0; i < GDMA_DEV_LIST_SIZE &&
+ found_dev < resp.num_of_devs; i++) {
dev = resp.devs[i];
dev_type = dev.type;
+ /* Skip empty devices */
+ if (dev.as_uint32 == 0)
+ continue;
+
+ found_dev++;
+
/* HWC is already detected in mana_hwc_create_channel(). */
if (dev_type == GDMA_DEVICE_HWC)
continue;
@@ -260,8 +298,9 @@ static int mana_gd_disable_queue(struct gdma_queue *queue)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
- resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to disable queue: %d, 0x%x\n", err,
+ resp.hdr.status);
return err ? err : -EPROTO;
}
@@ -331,6 +370,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
}
+EXPORT_SYMBOL_NS(mana_gd_wq_ring_doorbell, "NET_MANA");
void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
{
@@ -343,12 +383,115 @@ void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit)
mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id,
head, arm_bit);
}
+EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA");
+
+#define MANA_SERVICE_PERIOD 10
+
+static void mana_serv_fpga(struct pci_dev *pdev)
+{
+ struct pci_bus *bus, *parent;
+
+ pci_lock_rescan_remove();
+
+ bus = pdev->bus;
+ if (!bus) {
+ dev_err(&pdev->dev, "MANA service: no bus\n");
+ goto out;
+ }
+
+ parent = bus->parent;
+ if (!parent) {
+ dev_err(&pdev->dev, "MANA service: no parent bus\n");
+ goto out;
+ }
+
+ pci_stop_and_remove_bus_device(bus->self);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ pci_rescan_bus(parent);
+
+out:
+ pci_unlock_rescan_remove();
+}
+
+static void mana_serv_reset(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct hw_channel_context *hwc;
+
+ if (!gc) {
+ dev_err(&pdev->dev, "MANA service: no GC\n");
+ return;
+ }
+
+ hwc = gc->hwc.driver_data;
+ if (!hwc) {
+ dev_err(&pdev->dev, "MANA service: no HWC\n");
+ goto out;
+ }
+
+ /* HWC is not responding in this case, so don't wait */
+ hwc->hwc_timeout = 0;
+
+ dev_info(&pdev->dev, "MANA reset cycle start\n");
+
+ mana_gd_suspend(pdev, PMSG_SUSPEND);
+
+ msleep(MANA_SERVICE_PERIOD * 1000);
+
+ mana_gd_resume(pdev);
+
+ dev_info(&pdev->dev, "MANA reset cycle completed\n");
+
+out:
+ gc->in_service = false;
+}
+
+struct mana_serv_work {
+ struct work_struct serv_work;
+ struct pci_dev *pdev;
+ enum gdma_eqe_type type;
+};
+
+static void mana_serv_func(struct work_struct *w)
+{
+ struct mana_serv_work *mns_wk;
+ struct pci_dev *pdev;
+
+ mns_wk = container_of(w, struct mana_serv_work, serv_work);
+ pdev = mns_wk->pdev;
+
+ if (!pdev)
+ goto out;
+
+ switch (mns_wk->type) {
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ mana_serv_fpga(pdev);
+ break;
+
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ mana_serv_reset(pdev);
+ break;
+
+ default:
+ dev_err(&pdev->dev, "MANA service: unknown type %d\n",
+ mns_wk->type);
+ break;
+ }
+
+out:
+ pci_dev_put(pdev);
+ kfree(mns_wk);
+ module_put(THIS_MODULE);
+}
static void mana_gd_process_eqe(struct gdma_queue *eq)
{
u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE);
struct gdma_context *gc = eq->gdma_dev->gdma_context;
struct gdma_eqe *eq_eqe_ptr = eq->queue_mem_ptr;
+ struct mana_serv_work *mns_wk;
union gdma_eqe_info eqe_info;
enum gdma_eqe_type type;
struct gdma_event event;
@@ -383,6 +526,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_HWC_SOC_SERVICE:
case GDMA_EQE_RNIC_QP_FATAL:
if (!eq->eq.callback)
break;
@@ -392,6 +536,35 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
eq->eq.callback(eq->eq.context, eq, &event);
break;
+ case GDMA_EQE_HWC_FPGA_RECONFIG:
+ case GDMA_EQE_HWC_RESET_REQUEST:
+ dev_info(gc->dev, "Recv MANA service type:%d\n", type);
+
+ if (gc->in_service) {
+ dev_info(gc->dev, "Already in service\n");
+ break;
+ }
+
+ if (!try_module_get(THIS_MODULE)) {
+ dev_info(gc->dev, "Module is unloading\n");
+ break;
+ }
+
+ mns_wk = kzalloc(sizeof(*mns_wk), GFP_ATOMIC);
+ if (!mns_wk) {
+ module_put(THIS_MODULE);
+ break;
+ }
+
+ dev_info(gc->dev, "Start MANA service type:%d\n", type);
+ gc->in_service = true;
+ mns_wk->pdev = to_pci_dev(gc->dev);
+ mns_wk->type = type;
+ pci_dev_get(mns_wk->pdev);
+ INIT_WORK(&mns_wk->serv_work, mana_serv_func);
+ schedule_work(&mns_wk->serv_work);
+ break;
+
default:
break;
}
@@ -474,7 +647,9 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
}
queue->eq.msix_index = msi_index;
- gic = &gc->irq_contexts[msi_index];
+ gic = xa_load(&gc->irq_contexts, msi_index);
+ if (WARN_ON(!gic))
+ return -EINVAL;
spin_lock_irqsave(&gic->lock, flags);
list_add_rcu(&queue->entry, &gic->eq_list);
@@ -483,7 +658,7 @@ static int mana_gd_register_irq(struct gdma_queue *queue,
return 0;
}
-static void mana_gd_deregiser_irq(struct gdma_queue *queue)
+static void mana_gd_deregister_irq(struct gdma_queue *queue)
{
struct gdma_dev *gd = queue->gdma_dev;
struct gdma_irq_context *gic;
@@ -499,7 +674,10 @@ static void mana_gd_deregiser_irq(struct gdma_queue *queue)
if (WARN_ON(msix_index >= gc->num_msix_usable))
return;
- gic = &gc->irq_contexts[msix_index];
+ gic = xa_load(&gc->irq_contexts, msix_index);
+ if (WARN_ON(!gic))
+ return;
+
spin_lock_irqsave(&gic->lock, flags);
list_for_each_entry_rcu(eq, &gic->eq_list, entry) {
if (queue == eq) {
@@ -533,7 +711,8 @@ int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err) {
- dev_err(dev, "test_eq failed: %d\n", err);
+ if (mana_need_log(gc, err))
+ dev_err(dev, "test_eq failed: %d\n", err);
goto out;
}
@@ -568,11 +747,11 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets,
if (flush_evenets) {
err = mana_gd_test_eq(gc, queue);
- if (err)
+ if (err && mana_need_log(gc, err))
dev_warn(gc->dev, "Failed to flush EQ: %d\n", err);
}
- mana_gd_deregiser_irq(queue);
+ mana_gd_deregister_irq(queue);
if (queue->eq.disable_needed)
mana_gd_disable_queue(queue);
@@ -666,8 +845,11 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
queue->head = 0;
queue->tail = 0;
@@ -688,6 +870,8 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -709,8 +893,9 @@ int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n",
+ err, resp.hdr.status);
return -EPROTO;
}
@@ -770,7 +955,13 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
}
gmi->dma_region_handle = resp.dma_region_handle;
+ dev_dbg(gc->dev, "Created DMA region handle 0x%llx\n",
+ gmi->dma_region_handle);
out:
+ if (err)
+ dev_dbg(gc->dev,
+ "Failed to create DMA region of length: %u, page_type: %d, status: 0x%x, err: %d\n",
+ length, req->gdma_page_type, resp.hdr.status, err);
kfree(req);
return err;
}
@@ -793,8 +984,11 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, gdma memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -815,6 +1009,8 @@ int mana_gd_create_mana_eq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
@@ -841,8 +1037,11 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "GDMA queue type: %d, size: %u, memory allocation err: %d\n",
+ spec->type, spec->queue_size, err);
goto free_q;
+ }
err = mana_gd_create_dma_region(gd, gmi);
if (err)
@@ -862,11 +1061,14 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
*queue_ptr = queue;
return 0;
out:
+ dev_err(gc->dev, "Failed to create queue type %d of size: %u, err: %d\n",
+ spec->type, spec->queue_size, err);
mana_gd_free_memory(gmi);
free_q:
kfree(queue);
return err;
}
+EXPORT_SYMBOL_NS(mana_gd_create_mana_wq_cq, "NET_MANA");
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
{
@@ -934,6 +1136,7 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
err, resp.hdr.status);
return err ? err : -EPROTO;
}
+ gc->pf_cap_flags1 = resp.pf_cap_flags1;
if (resp.pf_cap_flags1 & GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) {
err = mana_gd_query_hwc_timeout(pdev, &hwc->hwc_timeout);
if (err) {
@@ -974,7 +1177,6 @@ int mana_gd_register_device(struct gdma_dev *gd)
return 0;
}
-EXPORT_SYMBOL_NS(mana_gd_register_device, "NET_MANA");
int mana_gd_deregister_device(struct gdma_dev *gd)
{
@@ -993,8 +1195,9 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
- dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
- err, resp.hdr.status);
+ if (mana_need_log(gc, err))
+ dev_err(gc->dev, "Failed to deregister device: %d, 0x%x\n",
+ err, resp.hdr.status);
if (!err)
err = -EPROTO;
}
@@ -1005,7 +1208,6 @@ int mana_gd_deregister_device(struct gdma_dev *gd)
return err;
}
-EXPORT_SYMBOL_NS(mana_gd_deregister_device, "NET_MANA");
u32 mana_gd_wq_avail_space(struct gdma_queue *wq)
{
@@ -1041,7 +1243,7 @@ static u32 mana_gd_write_client_oob(const struct gdma_wqe_request *wqe_req,
header->inline_oob_size_div4 = client_oob_size / sizeof(u32);
if (oob_in_sgl) {
- WARN_ON_ONCE(!pad_data || wqe_req->num_sge < 2);
+ WARN_ON_ONCE(wqe_req->num_sge < 2);
header->client_oob_in_sgl = 1;
@@ -1148,6 +1350,7 @@ int mana_gd_post_work_request(struct gdma_queue *wq,
return 0;
}
+EXPORT_SYMBOL_NS(mana_gd_post_work_request, "NET_MANA");
int mana_gd_post_and_ring(struct gdma_queue *queue,
const struct gdma_wqe_request *wqe_req,
@@ -1157,8 +1360,11 @@ int mana_gd_post_and_ring(struct gdma_queue *queue,
int err;
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to post work req from queue type %d of size %u (err=%d)\n",
+ queue->type, queue->queue_size, err);
return err;
+ }
mana_gd_wq_ring_doorbell(gc, queue);
@@ -1218,6 +1424,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
return cqe_idx;
}
+EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
static irqreturn_t mana_gd_intr(int irq, void *arg)
{
@@ -1253,7 +1460,49 @@ void mana_gd_free_res_map(struct gdma_resource *r)
r->size = 0;
}
-static int irq_setup(unsigned int *irqs, unsigned int len, int node)
+/*
+ * Spread on CPUs with the following heuristics:
+ *
+ * 1. No more than one IRQ per CPU, if possible;
+ * 2. NUMA locality is the second priority;
+ * 3. Sibling dislocality is the last priority.
+ *
+ * Let's consider this topology:
+ *
+ * Node 0 1
+ * Core 0 1 2 3
+ * CPU 0 1 2 3 4 5 6 7
+ *
+ * The most performant IRQ distribution based on the above topology
+ * and heuristics may look like this:
+ *
+ * IRQ Nodes Cores CPUs
+ * 0 1 0 0-1
+ * 1 1 1 2-3
+ * 2 1 0 0-1
+ * 3 1 1 2-3
+ * 4 2 2 4-5
+ * 5 2 3 6-7
+ * 6 2 2 4-5
+ * 7 2 3 6-7
+ *
+ * The heuristics is implemented as follows.
+ *
+ * The outer for_each() loop resets the 'weight' to the actual number
+ * of CPUs in the hop. Then inner for_each() loop decrements it by the
+ * number of sibling groups (cores) while assigning first set of IRQs
+ * to each group. IRQs 0 and 1 above are distributed this way.
+ *
+ * Now, because NUMA locality is more important, we should walk the
+ * same set of siblings and assign 2nd set of IRQs (2 and 3), and it's
+ * implemented by the medium while() loop. We do like this unless the
+ * number of IRQs assigned on this hop will not become equal to number
+ * of CPUs in the hop (weight == 0). Then we switch to the next hop and
+ * do the same thing.
+ */
+
+static int irq_setup(unsigned int *irqs, unsigned int len, int node,
+ bool skip_first_cpu)
{
const struct cpumask *next, *prev = cpu_none_mask;
cpumask_var_t cpus __free(free_cpumask_var);
@@ -1268,11 +1517,18 @@ static int irq_setup(unsigned int *irqs, unsigned int len, int node)
while (weight > 0) {
cpumask_andnot(cpus, next, prev);
for_each_cpu(cpu, cpus) {
+ cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
+ --weight;
+
+ if (unlikely(skip_first_cpu)) {
+ skip_first_cpu = false;
+ continue;
+ }
+
if (len-- == 0)
goto done;
+
irq_set_affinity_and_hint(*irqs++, topology_sibling_cpumask(cpu));
- cpumask_andnot(cpus, cpus, topology_sibling_cpumask(cpu));
- --weight;
}
}
prev = next;
@@ -1282,47 +1538,108 @@ done:
return 0;
}
-static int mana_gd_setup_irqs(struct pci_dev *pdev)
+static int mana_gd_setup_dyn_irqs(struct pci_dev *pdev, int nvec)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- unsigned int max_queues_per_port;
struct gdma_irq_context *gic;
- unsigned int max_irqs, cpu;
- int start_irq_index = 1;
- int nvec, *irqs, irq;
- int err, i = 0, j;
+ bool skip_first_cpu = false;
+ int *irqs, irq, err, i;
- cpus_read_lock();
- max_queues_per_port = num_online_cpus();
- if (max_queues_per_port > MANA_MAX_NUM_QUEUES)
- max_queues_per_port = MANA_MAX_NUM_QUEUES;
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
- /* Need 1 interrupt for the Hardware communication Channel (HWC) */
- max_irqs = max_queues_per_port + 1;
+ /*
+ * While processing the next pci irq vector, we start with index 1,
+ * as IRQ vector at index 0 is already processed for HWC.
+ * However, the population of irqs array starts with index 0, to be
+ * further used in irq_setup()
+ */
+ for (i = 1; i <= nvec; i++) {
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+ gic->handler = mana_gd_process_eq_events;
+ INIT_LIST_HEAD(&gic->eq_list);
+ spin_lock_init(&gic->lock);
- nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX);
- if (nvec < 0) {
- cpus_read_unlock();
- return nvec;
+ snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
+ i - 1, pci_name(pdev));
+
+ /* one pci vector is already allocated for HWC */
+ irqs[i - 1] = pci_irq_vector(pdev, i);
+ if (irqs[i - 1] < 0) {
+ err = irqs[i - 1];
+ goto free_current_gic;
+ }
+
+ err = request_irq(irqs[i - 1], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- if (nvec <= num_online_cpus())
- start_irq_index = 0;
- irqs = kmalloc_array((nvec - start_irq_index), sizeof(int), GFP_KERNEL);
- if (!irqs) {
- err = -ENOMEM;
- goto free_irq_vector;
+ /*
+ * When calling irq_setup() for dynamically added IRQs, if number of
+ * CPUs is more than or equal to allocated MSI-X, we need to skip the
+ * first CPU sibling group since they are already affinitized to HWC IRQ
+ */
+ cpus_read_lock();
+ if (gc->num_msix_usable <= num_online_cpus())
+ skip_first_cpu = true;
+
+ err = irq_setup(irqs, nvec, gc->numa_node, skip_first_cpu);
+ if (err) {
+ cpus_read_unlock();
+ goto free_irq;
}
- gc->irq_contexts = kcalloc(nvec, sizeof(struct gdma_irq_context),
- GFP_KERNEL);
- if (!gc->irq_contexts) {
- err = -ENOMEM;
- goto free_irq_array;
+ cpus_read_unlock();
+ kfree(irqs);
+ return 0;
+
+free_current_gic:
+ kfree(gic);
+free_irq:
+ for (i -= 1; i > 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
+
+ irq_update_affinity_hint(irq, NULL);
+ free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
+ kfree(irqs);
+ return err;
+}
+
+static int mana_gd_setup_irqs(struct pci_dev *pdev, int nvec)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct gdma_irq_context *gic;
+ int *irqs, *start_irqs, irq;
+ unsigned int cpu;
+ int err, i;
+
+ irqs = kmalloc_array(nvec, sizeof(int), GFP_KERNEL);
+ if (!irqs)
+ return -ENOMEM;
+
+ start_irqs = irqs;
for (i = 0; i < nvec; i++) {
- gic = &gc->irq_contexts[i];
+ gic = kzalloc(sizeof(*gic), GFP_KERNEL);
+ if (!gic) {
+ err = -ENOMEM;
+ goto free_irq;
+ }
+
gic->handler = mana_gd_process_eq_events;
INIT_LIST_HEAD(&gic->eq_list);
spin_lock_init(&gic->lock);
@@ -1334,69 +1651,128 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev)
snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s",
i - 1, pci_name(pdev));
- irq = pci_irq_vector(pdev, i);
- if (irq < 0) {
- err = irq;
- goto free_irq;
+ irqs[i] = pci_irq_vector(pdev, i);
+ if (irqs[i] < 0) {
+ err = irqs[i];
+ goto free_current_gic;
}
- if (!i) {
- err = request_irq(irq, mana_gd_intr, 0, gic->name, gic);
- if (err)
- goto free_irq;
-
- /* If number of IRQ is one extra than number of online CPUs,
- * then we need to assign IRQ0 (hwc irq) and IRQ1 to
- * same CPU.
- * Else we will use different CPUs for IRQ0 and IRQ1.
- * Also we are using cpumask_local_spread instead of
- * cpumask_first for the node, because the node can be
- * mem only.
- */
- if (start_irq_index) {
- cpu = cpumask_local_spread(i, gc->numa_node);
- irq_set_affinity_and_hint(irq, cpumask_of(cpu));
- } else {
- irqs[start_irq_index] = irq;
- }
- } else {
- irqs[i - start_irq_index] = irq;
- err = request_irq(irqs[i - start_irq_index], mana_gd_intr, 0,
- gic->name, gic);
- if (err)
- goto free_irq;
- }
+ err = request_irq(irqs[i], mana_gd_intr, 0, gic->name, gic);
+ if (err)
+ goto free_current_gic;
+
+ xa_store(&gc->irq_contexts, i, gic, GFP_KERNEL);
}
- err = irq_setup(irqs, (nvec - start_irq_index), gc->numa_node);
- if (err)
+ /* If number of IRQ is one extra than number of online CPUs,
+ * then we need to assign IRQ0 (hwc irq) and IRQ1 to
+ * same CPU.
+ * Else we will use different CPUs for IRQ0 and IRQ1.
+ * Also we are using cpumask_local_spread instead of
+ * cpumask_first for the node, because the node can be
+ * mem only.
+ */
+ cpus_read_lock();
+ if (nvec > num_online_cpus()) {
+ cpu = cpumask_local_spread(0, gc->numa_node);
+ irq_set_affinity_and_hint(irqs[0], cpumask_of(cpu));
+ irqs++;
+ nvec -= 1;
+ }
+
+ err = irq_setup(irqs, nvec, gc->numa_node, false);
+ if (err) {
+ cpus_read_unlock();
goto free_irq;
+ }
- gc->max_num_msix = nvec;
- gc->num_msix_usable = nvec;
cpus_read_unlock();
- kfree(irqs);
+ kfree(start_irqs);
return 0;
+free_current_gic:
+ kfree(gic);
free_irq:
- for (j = i - 1; j >= 0; j--) {
- irq = pci_irq_vector(pdev, j);
- gic = &gc->irq_contexts[j];
+ for (i -= 1; i >= 0; i--) {
+ irq = pci_irq_vector(pdev, i);
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
-free_irq_array:
- kfree(irqs);
-free_irq_vector:
- cpus_read_unlock();
- pci_free_irq_vectors(pdev);
+ kfree(start_irqs);
return err;
}
+static int mana_gd_setup_hwc_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ unsigned int max_irqs, min_irqs;
+ int nvec, err;
+
+ if (pci_msix_can_alloc_dyn(pdev)) {
+ max_irqs = 1;
+ min_irqs = 1;
+ } else {
+ /* Need 1 interrupt for HWC */
+ max_irqs = min(num_online_cpus(), MANA_MAX_NUM_QUEUES) + 1;
+ min_irqs = 2;
+ }
+
+ nvec = pci_alloc_irq_vectors(pdev, min_irqs, max_irqs, PCI_IRQ_MSIX);
+ if (nvec < 0)
+ return nvec;
+
+ err = mana_gd_setup_irqs(pdev, nvec);
+ if (err) {
+ pci_free_irq_vectors(pdev);
+ return err;
+ }
+
+ gc->num_msix_usable = nvec;
+ gc->max_num_msix = nvec;
+
+ return 0;
+}
+
+static int mana_gd_setup_remaining_irqs(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ struct msi_map irq_map;
+ int max_irqs, i, err;
+
+ if (!pci_msix_can_alloc_dyn(pdev))
+ /* remain irqs are already allocated with HWC IRQ */
+ return 0;
+
+ /* allocate only remaining IRQs*/
+ max_irqs = gc->num_msix_usable - 1;
+
+ for (i = 1; i <= max_irqs; i++) {
+ irq_map = pci_msix_alloc_irq_at(pdev, i, NULL);
+ if (!irq_map.virq) {
+ err = irq_map.index;
+ /* caller will handle cleaning up all allocated
+ * irqs, after HWC is destroyed
+ */
+ return err;
+ }
+ }
+
+ err = mana_gd_setup_dyn_irqs(pdev, max_irqs);
+ if (err)
+ return err;
+
+ gc->max_num_msix = gc->max_num_msix + max_irqs;
+
+ return 0;
+}
+
static void mana_gd_remove_irqs(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
@@ -1411,19 +1787,21 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
if (irq < 0)
continue;
- gic = &gc->irq_contexts[i];
+ gic = xa_load(&gc->irq_contexts, i);
+ if (WARN_ON(!gic))
+ continue;
/* Need to clear the hint before free_irq */
irq_update_affinity_hint(irq, NULL);
free_irq(irq, gic);
+ xa_erase(&gc->irq_contexts, i);
+ kfree(gic);
}
pci_free_irq_vectors(pdev);
gc->max_num_msix = 0;
gc->num_msix_usable = 0;
- kfree(gc->irq_contexts);
- gc->irq_contexts = NULL;
}
static int mana_gd_setup(struct pci_dev *pdev)
@@ -1434,9 +1812,16 @@ static int mana_gd_setup(struct pci_dev *pdev)
mana_gd_init_registers(pdev);
mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
- err = mana_gd_setup_irqs(pdev);
- if (err)
- return err;
+ gc->service_wq = alloc_ordered_workqueue("gdma_service_wq", 0);
+ if (!gc->service_wq)
+ return -ENOMEM;
+
+ err = mana_gd_setup_hwc_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup IRQs for HWC creation: %d\n",
+ err);
+ goto free_workqueue;
+ }
err = mana_hwc_create_channel(gc);
if (err)
@@ -1450,16 +1835,26 @@ static int mana_gd_setup(struct pci_dev *pdev)
if (err)
goto destroy_hwc;
+ err = mana_gd_setup_remaining_irqs(pdev);
+ if (err) {
+ dev_err(gc->dev, "Failed to setup remaining IRQs: %d", err);
+ goto destroy_hwc;
+ }
+
err = mana_gd_detect_devices(pdev);
if (err)
goto destroy_hwc;
+ dev_dbg(&pdev->dev, "mana gdma setup successful\n");
return 0;
destroy_hwc:
mana_hwc_destroy_channel(gc);
remove_irq:
mana_gd_remove_irqs(pdev);
+free_workqueue:
+ destroy_workqueue(gc->service_wq);
+ dev_err(&pdev->dev, "%s failed (error %d)\n", __func__, err);
return err;
}
@@ -1470,6 +1865,9 @@ static void mana_gd_cleanup(struct pci_dev *pdev)
mana_hwc_destroy_channel(gc);
mana_gd_remove_irqs(pdev);
+
+ destroy_workqueue(gc->service_wq);
+ dev_dbg(&pdev->dev, "mana gdma cleanup successful\n");
}
static bool mana_is_pf(unsigned short dev_id)
@@ -1488,8 +1886,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE);
err = pci_enable_device(pdev);
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable pci device (err=%d)\n", err);
return -ENXIO;
+ }
pci_set_master(pdev);
@@ -1498,9 +1898,10 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto disable_dev;
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (err)
+ if (err) {
+ dev_err(&pdev->dev, "DMA set mask failed: %d\n", err);
goto release_region;
-
+ }
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
err = -ENOMEM;
@@ -1520,6 +1921,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->is_pf = mana_is_pf(pdev->device);
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
+ xa_init(&gc->irq_contexts);
if (gc->is_pf)
gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root);
@@ -1535,8 +1937,14 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto cleanup_gd;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ goto cleanup_mana;
+
return 0;
+cleanup_mana:
+ mana_remove(&gc->mana, false);
cleanup_gd:
mana_gd_cleanup(pdev);
unmap_bar:
@@ -1547,6 +1955,8 @@ unmap_bar:
* adapter-MTU file and apc->mana_pci_debugfs folder.
*/
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+ xa_destroy(&gc->irq_contexts);
pci_iounmap(pdev, bar0_va);
free_gc:
pci_set_drvdata(pdev, NULL);
@@ -1563,25 +1973,33 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, false);
mana_gd_cleanup(pdev);
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
+ xa_destroy(&gc->irq_contexts);
+
pci_iounmap(pdev, gc->bar0_va);
vfree(gc);
pci_release_regions(pdev);
pci_disable_device(pdev);
+
+ dev_dbg(&pdev->dev, "mana gdma remove successful\n");
}
/* The 'state' parameter is not used. */
-static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
@@ -1593,7 +2011,7 @@ static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
* fail -- if this happens, it's safer to just report an error than try to undo
* what has been done.
*/
-static int mana_gd_resume(struct pci_dev *pdev)
+int mana_gd_resume(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
int err;
@@ -1606,6 +2024,10 @@ static int mana_gd_resume(struct pci_dev *pdev)
if (err)
return err;
+ err = mana_rdma_probe(&gc->mana_ib);
+ if (err)
+ return err;
+
return 0;
}
@@ -1616,12 +2038,15 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
dev_info(&pdev->dev, "Shutdown was called\n");
+ mana_rdma_remove(&gc->mana_ib);
mana_remove(&gc->mana, true);
mana_gd_cleanup(pdev);
debugfs_remove_recursive(gc->mana_pci_debugfs);
+ gc->mana_pci_debugfs = NULL;
+
pci_disable_device(pdev);
}
@@ -1648,8 +2073,10 @@ static int __init mana_driver_init(void)
mana_debugfs_root = debugfs_create_dir("mana", NULL);
err = pci_register_driver(&mana_driver);
- if (err)
+ if (err) {
debugfs_remove(mana_debugfs_root);
+ mana_debugfs_root = NULL;
+ }
return err;
}
@@ -1659,6 +2086,8 @@ static void __exit mana_driver_exit(void)
pci_unregister_driver(&mana_driver);
debugfs_remove(mana_debugfs_root);
+
+ mana_debugfs_root = NULL;
}
module_init(mana_driver_init);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index a00f915c5188..ef072e24c46d 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2021, Microsoft Corporation. */
#include <net/mana/gdma.h>
+#include <net/mana/mana.h>
#include <net/mana/hw_channel.h>
#include <linux/vmalloc.h>
@@ -112,11 +113,13 @@ out:
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
struct gdma_event *event)
{
+ union hwc_init_soc_service_type service_data;
struct hw_channel_context *hwc = ctx;
struct gdma_dev *gd = hwc->gdma_dev;
union hwc_init_type_data type_data;
union hwc_init_eq_id_db eq_db;
u32 type, val;
+ int ret;
switch (event->type) {
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
@@ -199,7 +202,24 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
}
break;
+ case GDMA_EQE_HWC_SOC_SERVICE:
+ service_data.as_uint32 = event->details[0];
+ type = service_data.type;
+ switch (type) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ ret = mana_rdma_service_event(gd->gdma_context, type);
+ if (ret)
+ dev_err(hwc->dev, "Failed to schedule adev service event: %d\n",
+ ret);
+ break;
+ default:
+ dev_warn(hwc->dev, "Received unknown SOC service type %u\n", type);
+ break;
+ }
+
+ break;
default:
dev_warn(hwc->dev, "Received unknown gdma event %u\n", event->type);
/* Ignore unknown events, which should never happen. */
@@ -440,7 +460,8 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
if (err) {
- dev_err(hwc->dev, "Failed to allocate DMA buffer: %d\n", err);
+ dev_err(hwc->dev, "Failed to allocate DMA buffer size: %u, err %d\n",
+ buf_size, err);
goto out;
}
@@ -529,6 +550,9 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
out:
if (err)
mana_hwc_destroy_wq(hwc, hwc_wq);
+
+ dev_err(hwc->dev, "Failed to create HWC queue size= %u type= %d err= %d\n",
+ queue_size, q_type, err);
return err;
}
@@ -856,7 +880,9 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
if (!wait_for_completion_timeout(&ctx->comp_event,
(msecs_to_jiffies(hwc->hwc_timeout)))) {
- dev_err(hwc->dev, "HWC: Request timed out!\n");
+ if (hwc->hwc_timeout != 0)
+ dev_err(hwc->dev, "HWC: Request timed out!\n");
+
err = -ETIMEDOUT;
goto out;
}
@@ -867,8 +893,13 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
- dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
- ctx->status_code);
+ if (ctx->status_code == GDMA_STATUS_CMD_UNSUPPORTED) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ if (req_msg->req.msg_type != MANA_QUERY_PHY_STAT)
+ dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n",
+ ctx->status_code);
err = -EPROTO;
goto out;
}
diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
index 23b1521c0df9..d30721d4516f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c
@@ -91,7 +91,7 @@ u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
goto out;
xdp_init_buff(xdp, PAGE_SIZE, &rxq->xdp_rxq);
- xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, false);
+ xdp_prepare_buff(xdp, buf_va, XDP_PACKET_HEADROOM, pkt_len, true);
act = bpf_prog_run_xdp(prog, xdp);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index aa1e47233fe5..550843e2164b 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -10,9 +10,11 @@
#include <linux/filter.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/export.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
+#include <net/netdev_lock.h>
#include <net/page_pool/helpers.h>
#include <net/xdp.h>
@@ -46,16 +48,27 @@ static const struct file_operations mana_dbg_q_fops = {
.read = mana_dbg_q_read,
};
+static bool mana_en_need_log(struct mana_port_context *apc, int err)
+{
+ if (apc && apc->ac && apc->ac->gdma_dev &&
+ apc->ac->gdma_dev->gdma_context)
+ return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
+ else
+ return true;
+}
+
/* Microsoft Azure Network Adapter (MANA) functions */
static int mana_open(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
int err;
-
err = mana_alloc_queues(ndev);
- if (err)
+
+ if (err) {
+ netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
return err;
+ }
apc->port_is_up = true;
@@ -64,7 +77,7 @@ static int mana_open(struct net_device *ndev)
netif_carrier_on(ndev);
netif_tx_wake_all_queues(ndev);
-
+ netdev_dbg(ndev, "%s successful\n", __func__);
return 0;
}
@@ -176,6 +189,9 @@ static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
return 0;
frag_err:
+ if (net_ratelimit())
+ netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
+ skb->len);
for (i = sg_i - 1; i >= hsg; i--)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
@@ -245,10 +261,10 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct netdev_queue *net_txq;
struct mana_stats_tx *tx_stats;
struct gdma_queue *gdma_sq;
+ int err, len, num_gso_seg;
unsigned int csum_type;
struct mana_txq *txq;
struct mana_cq *cq;
- int err, len;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -256,6 +272,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (skb_cow_head(skb, MANA_HEADROOM))
goto tx_drop_count;
+ if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto tx_drop_count;
+
txq = &apc->tx_qp[txq_idx].txq;
gdma_sq = txq->gdma_sq;
cq = &apc->tx_qp[txq_idx].tx_cq;
@@ -398,6 +417,7 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_queue_tail(&txq->pending_skbs, skb);
len = skb->len;
+ num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
net_txq = netdev_get_tx_queue(ndev, txq_idx);
err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
@@ -422,10 +442,13 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* skb may be freed after mana_gd_post_work_request. Do not use it. */
skb = NULL;
+ /* Populated the packet and bytes counters based on post GSO packet
+ * calculations
+ */
tx_stats = &txq->stats;
u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += len;
+ tx_stats->packets += num_gso_seg;
+ tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
u64_stats_update_end(&tx_stats->syncp);
tx_busy:
@@ -652,30 +675,16 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
mpc->rxbpre_total = 0;
for (i = 0; i < num_rxb; i++) {
- if (mpc->rxbpre_alloc_size > PAGE_SIZE) {
- va = netdev_alloc_frag(mpc->rxbpre_alloc_size);
- if (!va)
- goto error;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) <
- get_order(mpc->rxbpre_alloc_size)) {
- put_page(page);
- goto error;
- }
- } else {
- page = dev_alloc_page();
- if (!page)
- goto error;
+ page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
+ if (!page)
+ goto error;
- va = page_to_virt(page);
- }
+ va = page_to_virt(page);
da = dma_map_single(dev, va + mpc->rxbpre_headroom,
mpc->rxbpre_datasize, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, da)) {
- put_page(virt_to_head_page(va));
+ put_page(page);
goto error;
}
@@ -687,6 +696,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_qu
return 0;
error:
+ netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
mana_pre_dealloc_rxbufs(mpc);
return -ENOMEM;
}
@@ -723,6 +733,78 @@ out:
return err;
}
+static int mana_shaper_set(struct net_shaper_binding *binding,
+ const struct net_shaper *shaper,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ u32 old_speed, rate;
+ int err;
+
+ if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
+ NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
+ return -EINVAL;
+ }
+
+ if (apc->handle.id && shaper->handle.id != apc->handle.id) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
+ return -EOPNOTSUPP;
+ }
+
+ if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
+ NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
+ return -EINVAL;
+ }
+
+ rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
+ rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
+
+ /* Get current speed */
+ err = mana_query_link_cfg(apc);
+ old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
+
+ if (!err) {
+ err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
+ apc->speed = (err) ? old_speed : rate;
+ apc->handle = (err) ? apc->handle : shaper->handle;
+ }
+
+ return err;
+}
+
+static int mana_shaper_del(struct net_shaper_binding *binding,
+ const struct net_shaper_handle *handle,
+ struct netlink_ext_ack *extack)
+{
+ struct mana_port_context *apc = netdev_priv(binding->netdev);
+ int err;
+
+ err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
+
+ if (!err) {
+ /* Reset mana port context parameters */
+ apc->handle.id = 0;
+ apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
+ apc->speed = 0;
+ }
+
+ return err;
+}
+
+static void mana_shaper_cap(struct net_shaper_binding *binding,
+ enum net_shaper_scope scope,
+ unsigned long *flags)
+{
+ *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
+ BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
+}
+
+static const struct net_shaper_ops mana_shaper_ops = {
+ .set = mana_shaper_set,
+ .delete = mana_shaper_del,
+ .capabilities = mana_shaper_cap,
+};
+
static const struct net_device_ops mana_devops = {
.ndo_open = mana_open,
.ndo_stop = mana_close,
@@ -733,17 +815,17 @@ static const struct net_device_ops mana_devops = {
.ndo_bpf = mana_bpf,
.ndo_xdp_xmit = mana_xdp_xmit,
.ndo_change_mtu = mana_change_mtu,
+ .net_shaper_ops = &mana_shaper_ops,
};
static void mana_cleanup_port_context(struct mana_port_context *apc)
{
/*
- * at this point all dir/files under the vport directory
- * are already cleaned up.
- * We are sure the apc->mana_port_debugfs remove will not
- * cause any freed memory access issues
+ * make sure subsequent cleanup attempts don't end up removing already
+ * cleaned dentry pointer
*/
debugfs_remove(apc->mana_port_debugfs);
+ apc->mana_port_debugfs = NULL;
kfree(apc->rxqs);
apc->rxqs = NULL;
}
@@ -779,8 +861,13 @@ static int mana_send_request(struct mana_context *ac, void *in_buf,
err = mana_gd_send_request(gc, in_len, in_buf, out_len,
out_buf);
if (err || resp->status) {
- dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
- err, resp->status);
+ if (err == -EOPNOTSUPP)
+ return err;
+
+ if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
+ mana_need_log(gc, err))
+ dev_err(dev, "Failed to send mana message: %d, 0x%x\n",
+ err, resp->status);
return err ? err : -EPROTO;
}
@@ -855,8 +942,10 @@ static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
+ err);
+
return;
}
@@ -911,8 +1000,10 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
- err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
+ err);
+
return;
}
@@ -926,7 +1017,7 @@ static void mana_pf_deregister_filter(struct mana_port_context *apc)
static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
u32 proto_minor_ver, u32 proto_micro_ver,
- u16 *max_num_vports)
+ u16 *max_num_vports, u8 *bm_hostmode)
{
struct gdma_context *gc = ac->gdma_dev->gdma_context;
struct mana_query_device_cfg_resp resp = {};
@@ -937,7 +1028,7 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
sizeof(req), sizeof(resp));
- req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.proto_major_ver = proto_major_ver;
req.proto_minor_ver = proto_minor_ver;
@@ -961,11 +1052,16 @@ static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
*max_num_vports = resp.max_num_vports;
- if (resp.hdr.response.msg_version == GDMA_MESSAGE_V2)
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
gc->adapter_mtu = resp.adapter_mtu;
else
gc->adapter_mtu = ETH_FRAME_LEN;
+ if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
+ *bm_hostmode = resp.bm_hostmode;
+ else
+ *bm_hostmode = 0;
+
debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
return 0;
@@ -1137,7 +1233,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
+
goto out;
}
@@ -1161,6 +1259,95 @@ out:
return err;
}
+int mana_query_link_cfg(struct mana_port_context *apc)
+{
+ struct net_device *ndev = apc->ndev;
+ struct mana_query_link_config_resp resp = {};
+ struct mana_query_link_config_req req = {};
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(req), sizeof(resp));
+
+ req.vport = apc->port_handle;
+ req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to query link config: %d\n", err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured) {
+ err = -EINVAL;
+ return err;
+ }
+ apc->speed = resp.link_speed_mbps;
+ apc->max_speed = resp.qos_speed_mbps;
+ return 0;
+}
+
+int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
+ int enable_clamping)
+{
+ struct mana_set_bw_clamp_resp resp = {};
+ struct mana_set_bw_clamp_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
+ sizeof(req), sizeof(resp));
+ req.vport = apc->port_handle;
+ req.link_speed_mbps = speed;
+ req.enable_clamping = enable_clamping;
+
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+
+ if (err) {
+ if (err == -EOPNOTSUPP) {
+ netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
+ return err;
+ }
+ netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
+ speed, err);
+ return err;
+ }
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
+ sizeof(resp));
+
+ if (err || resp.hdr.status) {
+ netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
+ resp.hdr.status);
+ if (!err)
+ err = -EOPNOTSUPP;
+ return err;
+ }
+
+ if (resp.qos_unconfigured)
+ netdev_info(ndev, "QoS is unconfigured\n");
+
+ return 0;
+}
+
int mana_create_wq_obj(struct mana_port_context *apc,
mana_handle_t vport,
u32 wq_type, struct mana_obj_spec *wq_spec,
@@ -1232,7 +1419,9 @@ void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
sizeof(resp));
if (err) {
- netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+ if (mana_en_need_log(apc, err))
+ netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
+
return;
}
@@ -1254,6 +1443,7 @@ static void mana_destroy_eq(struct mana_context *ac)
return;
debugfs_remove_recursive(ac->mana_eqs_debugfs);
+ ac->mana_eqs_debugfs = NULL;
for (i = 0; i < gc->max_num_queues; i++) {
eq = ac->eqs[i].eq;
@@ -1304,8 +1494,10 @@ static int mana_create_eq(struct mana_context *ac)
for (i = 0; i < gc->max_num_queues; i++) {
spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
- if (err)
+ if (err) {
+ dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
goto out;
+ }
mana_create_eq_debugfs(ac, i);
}
@@ -1547,8 +1739,12 @@ static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
return NULL;
if (xdp->data_hard_start) {
+ u32 metasize = xdp->data - xdp->data_meta;
+
skb_reserve(skb, xdp->data - xdp->data_hard_start);
skb_put(skb, xdp->data_end - xdp->data);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
return skb;
}
@@ -1660,7 +1856,7 @@ drop:
}
static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
- dma_addr_t *da, bool *from_pool, bool is_napi)
+ dma_addr_t *da, bool *from_pool)
{
struct page *page;
void *va;
@@ -1671,21 +1867,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
if (rxq->xdp_save_va) {
va = rxq->xdp_save_va;
rxq->xdp_save_va = NULL;
- } else if (rxq->alloc_size > PAGE_SIZE) {
- if (is_napi)
- va = napi_alloc_frag(rxq->alloc_size);
- else
- va = netdev_alloc_frag(rxq->alloc_size);
-
- if (!va)
- return NULL;
-
- page = virt_to_head_page(va);
- /* Check if the frag falls back to single page */
- if (compound_order(page) < get_order(rxq->alloc_size)) {
- put_page(page);
- return NULL;
- }
} else {
page = page_pool_dev_alloc_pages(rxq->page_pool);
if (!page)
@@ -1718,7 +1899,7 @@ static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
dma_addr_t da;
void *va;
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, true);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return;
@@ -1914,12 +2095,15 @@ static void mana_destroy_txq(struct mana_port_context *apc)
for (i = 0; i < apc->num_queues; i++) {
debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
+ apc->tx_qp[i].mana_tx_debugfs = NULL;
napi = &apc->tx_qp[i].tx_cq.napi;
if (apc->tx_qp[i].txq.napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
apc->tx_qp[i].txq.napi_initialized = false;
}
mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
@@ -2071,8 +2255,11 @@ static int mana_create_txq(struct mana_port_context *apc,
mana_create_txq_debugfs(apc, i);
- netif_napi_add_tx(net, &cq->napi, mana_poll);
- napi_enable(&cq->napi);
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
+ netdev_lock_ops_to_full(net);
+ netif_napi_add_locked(net, &cq->napi, mana_poll);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(net);
txq->napi_initialized = true;
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
@@ -2080,6 +2267,8 @@ static int mana_create_txq(struct mana_port_context *apc,
return 0;
out:
+ netdev_err(net, "Failed to create %d TX queues, %d\n",
+ apc->num_queues, err);
mana_destroy_txq(apc);
return err;
}
@@ -2099,15 +2288,17 @@ static void mana_destroy_rxq(struct mana_port_context *apc,
return;
debugfs_remove_recursive(rxq->mana_rx_debugfs);
+ rxq->mana_rx_debugfs = NULL;
napi = &rxq->rx_cq.napi;
if (napi_initialized) {
napi_synchronize(napi);
- napi_disable(napi);
-
- netif_napi_del(napi);
+ netdev_lock_ops_to_full(napi->dev);
+ napi_disable_locked(napi);
+ netif_napi_del_locked(napi);
+ netdev_unlock_full_to_ops(napi->dev);
}
xdp_rxq_info_unreg(&rxq->xdp_rxq);
@@ -2156,7 +2347,7 @@ static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
if (mpc->rxbufs_pre)
va = mana_get_rxbuf_pre(rxq, &da);
else
- va = mana_get_rxfrag(rxq, dev, &da, &from_pool, false);
+ va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
if (!va)
return -ENOMEM;
@@ -2242,6 +2433,7 @@ static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
pprm.nid = gc->numa_node;
pprm.napi = &rxq->rx_cq.napi;
pprm.netdev = rxq->ndev;
+ pprm.order = get_order(rxq->alloc_size);
rxq->page_pool = page_pool_create(&pprm);
@@ -2357,14 +2549,18 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
- netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1);
+ netdev_lock_ops_to_full(ndev);
+ netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
+ netdev_unlock_full_to_ops(ndev);
WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
cq->napi.napi_id));
WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
rxq->page_pool));
- napi_enable(&cq->napi);
+ netdev_lock_ops_to_full(ndev);
+ napi_enable_locked(&cq->napi);
+ netdev_unlock_full_to_ops(ndev);
mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
out:
@@ -2415,6 +2611,7 @@ static int mana_add_rx_queues(struct mana_port_context *apc,
rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
if (!rxq) {
err = -ENOMEM;
+ netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
goto out;
}
@@ -2448,7 +2645,7 @@ static void mana_destroy_vport(struct mana_port_context *apc)
mana_destroy_txq(apc);
mana_uncfg_vport(apc);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_hw_vport(apc);
}
@@ -2460,7 +2657,7 @@ static int mana_create_vport(struct mana_port_context *apc,
apc->default_rxobj = INVALID_MANA_HANDLE;
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_hw_vport(apc);
if (err)
return err;
@@ -2613,6 +2810,88 @@ void mana_query_gf_stats(struct mana_port_context *apc)
apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma;
}
+void mana_query_phy_stats(struct mana_port_context *apc)
+{
+ struct mana_query_phy_stat_resp resp = {};
+ struct mana_query_phy_stat_req req = {};
+ struct net_device *ndev = apc->ndev;
+ int err;
+
+ mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(req), sizeof(resp));
+ err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
+ sizeof(resp));
+ if (err)
+ return;
+
+ err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
+ sizeof(resp));
+ if (err || resp.hdr.status) {
+ netdev_err(ndev,
+ "Failed to query PHY stats: %d, resp:0x%x\n",
+ err, resp.hdr.status);
+ return;
+ }
+
+ /* Aggregate drop counters */
+ apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
+ apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
+
+ /* Per TC traffic Counters */
+ apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
+ apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
+ apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
+ apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
+ apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
+ apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
+ apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
+ apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
+ apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
+ apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
+ apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
+ apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
+ apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
+ apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
+ apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
+ apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
+
+ /* Per TC byte Counters */
+ apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
+ apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
+ apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
+ apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
+ apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
+ apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
+ apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
+ apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
+ apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
+ apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
+ apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
+ apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
+ apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
+ apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
+ apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
+ apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
+
+ /* Per TC pause Counters */
+ apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
+ apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
+ apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
+ apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
+ apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
+ apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
+ apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
+ apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
+ apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
+ apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
+ apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
+ apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
+ apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
+ apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
+ apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
+ apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
+}
+
static int mana_init_port(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -2661,12 +2940,18 @@ int mana_alloc_queues(struct net_device *ndev)
int err;
err = mana_create_vport(apc, ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
return err;
+ }
err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
err = mana_add_rx_queues(apc, ndev);
if (err)
@@ -2675,16 +2960,22 @@ int mana_alloc_queues(struct net_device *ndev)
apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
- if (err)
+ if (err) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
+ apc->num_queues, err);
goto destroy_vport;
+ }
mana_rss_table_init(apc);
err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
- if (err)
+ if (err) {
+ netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
goto destroy_vport;
+ }
- if (gd->gdma_context->is_pf) {
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
err = mana_pf_register_filter(apc);
if (err)
goto destroy_vport;
@@ -2746,7 +3037,7 @@ static int mana_dealloc_queues(struct net_device *ndev)
mana_chn_setxdp(apc, NULL);
- if (gd->gdma_context->is_pf)
+ if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
mana_pf_deregister_filter(apc);
/* No packet can be transmitted now since apc->port_is_up is false.
@@ -2795,11 +3086,10 @@ static int mana_dealloc_queues(struct net_device *ndev)
apc->rss_state = TRI_STATE_FALSE;
err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
- if (err) {
+ if (err && mana_en_need_log(apc, err))
netdev_err(ndev, "Failed to disable vPort: %d\n", err);
- return err;
- }
+ /* Even in err case, still need to cleanup the vPort */
mana_destroy_vport(apc);
return 0;
@@ -2823,8 +3113,10 @@ int mana_detach(struct net_device *ndev, bool from_close)
if (apc->port_st_save) {
err = mana_dealloc_queues(ndev);
- if (err)
+ if (err) {
+ netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
return err;
+ }
}
if (!from_close) {
@@ -2873,6 +3165,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
ndev->dev_port = port_idx;
SET_NETDEV_DEV(ndev, gc->dev);
+ netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
+
netif_carrier_off(ndev);
netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
@@ -2904,6 +3198,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
goto free_indir;
}
+ debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed);
+
return 0;
free_indir:
@@ -2936,7 +3232,7 @@ static void remove_adev(struct gdma_dev *gd)
gd->adev = NULL;
}
-static int add_adev(struct gdma_dev *gd)
+static int add_adev(struct gdma_dev *gd, const char *name)
{
struct auxiliary_device *adev;
struct mana_adev *madev;
@@ -2952,7 +3248,7 @@ static int add_adev(struct gdma_dev *gd)
goto idx_fail;
adev->id = ret;
- adev->name = "rdma";
+ adev->name = name;
adev->dev.parent = gd->gdma_context->dev;
adev->dev.release = adev_release;
madev->mdev = gd;
@@ -2968,6 +3264,8 @@ static int add_adev(struct gdma_dev *gd)
goto add_fail;
gd->adev = adev;
+ dev_dbg(gd->gdma_context->dev,
+ "Auxiliary device added successfully\n");
return 0;
add_fail:
@@ -2982,11 +3280,76 @@ idx_fail:
return ret;
}
+static void mana_rdma_service_handle(struct work_struct *work)
+{
+ struct mana_service_work *serv_work =
+ container_of(work, struct mana_service_work, work);
+ struct gdma_dev *gd = serv_work->gdma_dev;
+ struct device *dev = gd->gdma_context->dev;
+ int ret;
+
+ if (READ_ONCE(gd->rdma_teardown))
+ goto out;
+
+ switch (serv_work->event) {
+ case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
+ if (!gd->adev || gd->is_suspended)
+ break;
+
+ remove_adev(gd);
+ gd->is_suspended = true;
+ break;
+
+ case GDMA_SERVICE_TYPE_RDMA_RESUME:
+ if (!gd->is_suspended)
+ break;
+
+ ret = add_adev(gd, "rdma");
+ if (ret)
+ dev_err(dev, "Failed to add adev on resume: %d\n", ret);
+ else
+ gd->is_suspended = false;
+ break;
+
+ default:
+ dev_warn(dev, "unknown adev service event %u\n",
+ serv_work->event);
+ break;
+ }
+
+out:
+ kfree(serv_work);
+}
+
+int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
+{
+ struct gdma_dev *gd = &gc->mana_ib;
+ struct mana_service_work *serv_work;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return 0;
+ }
+
+ serv_work = kzalloc(sizeof(*serv_work), GFP_ATOMIC);
+ if (!serv_work)
+ return -ENOMEM;
+
+ serv_work->event = event;
+ serv_work->gdma_dev = gd;
+
+ INIT_WORK(&serv_work->work, mana_rdma_service_handle);
+ queue_work(gc->service_wq, &serv_work->work);
+
+ return 0;
+}
+
int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
+ u8 bm_hostmode = 0;
u16 num_ports = 0;
int err;
int i;
@@ -3009,14 +3372,18 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
err = mana_create_eq(ac);
- if (err)
+ if (err) {
+ dev_err(dev, "Failed to create EQs: %d\n", err);
goto out;
+ }
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &num_ports);
+ MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
if (err)
goto out;
+ ac->bm_hostmode = bm_hostmode;
+
if (!resuming) {
ac->num_ports = num_ports;
} else {
@@ -3064,10 +3431,16 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
}
}
- err = add_adev(gd);
+ err = add_adev(gd, "eth");
out:
- if (err)
+ if (err) {
mana_remove(gd, false);
+ } else {
+ dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
+ gd, gd->dev_id.as_uint32, ac->num_ports,
+ gd->dev_id.type, gd->dev_id.instance);
+ dev_dbg(dev, "%s succeeded\n", __func__);
+ }
return err;
}
@@ -3129,23 +3502,68 @@ out:
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
+ dev_dbg(dev, "%s succeeded\n", __func__);
}
-struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+int mana_rdma_probe(struct gdma_dev *gd)
+{
+ int err = 0;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return err;
+ }
+
+ err = mana_gd_register_device(gd);
+ if (err)
+ return err;
+
+ err = add_adev(gd, "rdma");
+ if (err)
+ mana_gd_deregister_device(gd);
+
+ return err;
+}
+
+void mana_rdma_remove(struct gdma_dev *gd)
+{
+ struct gdma_context *gc = gd->gdma_context;
+
+ if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
+ /* RDMA device is not detected on pci */
+ return;
+ }
+
+ WRITE_ONCE(gd->rdma_teardown, true);
+ flush_workqueue(gc->service_wq);
+
+ if (gd->adev)
+ remove_adev(gd);
+
+ mana_gd_deregister_device(gd);
+}
+
+struct net_device *mana_get_primary_netdev(struct mana_context *ac,
+ u32 port_index,
+ netdevice_tracker *tracker)
{
struct net_device *ndev;
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "Taking primary netdev without holding the RCU read lock");
if (port_index >= ac->num_ports)
return NULL;
- /* When mana is used in netvsc, the upper netdevice should be returned. */
- if (ac->ports[port_index]->flags & IFF_SLAVE)
- ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
- else
+ rcu_read_lock();
+
+ /* If mana is used in netvsc, the upper netdevice should be returned. */
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+
+ /* If there is no upper device, use the parent Ethernet device */
+ if (!ndev)
ndev = ac->ports[port_index];
+ netdev_hold(ndev, tracker, GFP_ATOMIC);
+ rcu_read_unlock();
+
return ndev;
}
-EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, "NET_MANA");
+EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index c419626073f5..a1afa75a9463 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -7,10 +7,12 @@
#include <net/mana/mana.h>
-static const struct {
+struct mana_stats_desc {
char name[ETH_GSTRING_LEN];
u16 offset;
-} mana_eth_stats[] = {
+};
+
+static const struct mana_stats_desc mana_eth_stats[] = {
{"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)},
{"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)},
{"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats,
@@ -75,6 +77,59 @@ static const struct {
rx_cqe_unknown_type)},
};
+static const struct mana_stats_desc mana_phy_stats[] = {
+ { "hc_rx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_drop_phy) },
+ { "hc_tx_pkt_drop_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_drop_phy) },
+ { "hc_tc0_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc0_phy) },
+ { "hc_tc0_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc0_phy) },
+ { "hc_tc0_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc0_phy) },
+ { "hc_tc0_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc0_phy) },
+ { "hc_tc1_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc1_phy) },
+ { "hc_tc1_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc1_phy) },
+ { "hc_tc1_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc1_phy) },
+ { "hc_tc1_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc1_phy) },
+ { "hc_tc2_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc2_phy) },
+ { "hc_tc2_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc2_phy) },
+ { "hc_tc2_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc2_phy) },
+ { "hc_tc2_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc2_phy) },
+ { "hc_tc3_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc3_phy) },
+ { "hc_tc3_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc3_phy) },
+ { "hc_tc3_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc3_phy) },
+ { "hc_tc3_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc3_phy) },
+ { "hc_tc4_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc4_phy) },
+ { "hc_tc4_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc4_phy) },
+ { "hc_tc4_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc4_phy) },
+ { "hc_tc4_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc4_phy) },
+ { "hc_tc5_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc5_phy) },
+ { "hc_tc5_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc5_phy) },
+ { "hc_tc5_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc5_phy) },
+ { "hc_tc5_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc5_phy) },
+ { "hc_tc6_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc6_phy) },
+ { "hc_tc6_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc6_phy) },
+ { "hc_tc6_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc6_phy) },
+ { "hc_tc6_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc6_phy) },
+ { "hc_tc7_rx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, rx_pkt_tc7_phy) },
+ { "hc_tc7_rx_byte_phy", offsetof(struct mana_ethtool_phy_stats, rx_byte_tc7_phy) },
+ { "hc_tc7_tx_pkt_phy", offsetof(struct mana_ethtool_phy_stats, tx_pkt_tc7_phy) },
+ { "hc_tc7_tx_byte_phy", offsetof(struct mana_ethtool_phy_stats, tx_byte_tc7_phy) },
+ { "hc_tc0_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc0_phy) },
+ { "hc_tc0_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc0_phy) },
+ { "hc_tc1_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc1_phy) },
+ { "hc_tc1_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc1_phy) },
+ { "hc_tc2_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc2_phy) },
+ { "hc_tc2_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc2_phy) },
+ { "hc_tc3_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc3_phy) },
+ { "hc_tc3_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc3_phy) },
+ { "hc_tc4_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc4_phy) },
+ { "hc_tc4_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc4_phy) },
+ { "hc_tc5_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc5_phy) },
+ { "hc_tc5_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc5_phy) },
+ { "hc_tc6_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc6_phy) },
+ { "hc_tc6_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc6_phy) },
+ { "hc_tc7_rx_pause_phy", offsetof(struct mana_ethtool_phy_stats, rx_pause_tc7_phy) },
+ { "hc_tc7_tx_pause_phy", offsetof(struct mana_ethtool_phy_stats, tx_pause_tc7_phy) },
+};
+
static int mana_get_sset_count(struct net_device *ndev, int stringset)
{
struct mana_port_context *apc = netdev_priv(ndev);
@@ -83,8 +138,8 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset)
if (stringset != ETH_SS_STATS)
return -EINVAL;
- return ARRAY_SIZE(mana_eth_stats) + num_queues *
- (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
+ return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) +
+ num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT);
}
static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
@@ -99,6 +154,9 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++)
ethtool_puts(&data, mana_eth_stats[i].name);
+ for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++)
+ ethtool_puts(&data, mana_phy_stats[i].name);
+
for (i = 0; i < num_queues; i++) {
ethtool_sprintf(&data, "rx_%d_packets", i);
ethtool_sprintf(&data, "rx_%d_bytes", i);
@@ -128,6 +186,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
struct mana_port_context *apc = netdev_priv(ndev);
unsigned int num_queues = apc->num_queues;
void *eth_stats = &apc->eth_stats;
+ void *phy_stats = &apc->phy_stats;
struct mana_stats_rx *rx_stats;
struct mana_stats_tx *tx_stats;
unsigned int start;
@@ -151,9 +210,18 @@ static void mana_get_ethtool_stats(struct net_device *ndev,
/* we call mana function to update stats from GDMA */
mana_query_gf_stats(apc);
+ /* We call this mana function to get the phy stats from GDMA and includes
+ * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause
+ * counters.
+ */
+ mana_query_phy_stats(apc);
+
for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++)
data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset);
+ for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++)
+ data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset);
+
for (q = 0; q < num_queues; q++) {
rx_stats = &apc->rxqs[q]->stats;
@@ -427,6 +495,12 @@ out:
static int mana_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
+ struct mana_port_context *apc = netdev_priv(ndev);
+ int err;
+
+ err = mana_query_link_cfg(apc);
+ cmd->base.speed = (err) ? SPEED_UNKNOWN : apc->max_speed;
+
cmd->base.duplex = DUPLEX_FULL;
cmd->base.port = PORT_OTHER;