diff options
Diffstat (limited to 'drivers')
75 files changed, 1967 insertions, 704 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a886713937fd..d5d30ed863ce 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1009,6 +1009,7 @@ static void dmaengine_unmap(struct kref *kref) dma_unmap_page(dev, unmap->addr[i], unmap->len, DMA_BIDIRECTIONAL); } + cnt = unmap->map_cnt; mempool_free(unmap, __get_unmap_pool(cnt)->pool); } @@ -1074,6 +1075,7 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) memset(unmap, 0, sizeof(*unmap)); kref_init(&unmap->kref); unmap->dev = dev; + unmap->map_cnt = nr; return unmap; } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index cfdbb92aae1d..7a740769c2fa 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1548,11 +1548,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* Disable BLOCK interrupts as well */ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); - err = devm_request_irq(chip->dev, chip->irq, dw_dma_interrupt, - IRQF_SHARED, "dw_dmac", dw); - if (err) - return err; - /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev, sizeof(struct dw_desc), 4, 0); @@ -1563,6 +1558,11 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); + err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED, + "dw_dmac", dw); + if (err) + return err; + INIT_LIST_HEAD(&dw->dma.channels); for (i = 0; i < nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; @@ -1667,6 +1667,7 @@ int dw_dma_remove(struct dw_dma_chip *chip) dw_dma_off(dw); dma_async_device_unregister(&dw->dma); + free_irq(chip->irq, dw); tasklet_kill(&dw->tasklet); list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 766b68ed505c..394cbc5c93e3 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -191,12 +191,10 @@ static void mv_set_mode(struct mv_xor_chan *chan, static void mv_chan_activate(struct mv_xor_chan *chan) { - u32 activation; - dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); - activation = readl_relaxed(XOR_ACTIVATION(chan)); - activation |= 0x1; - writel_relaxed(activation, XOR_ACTIVATION(chan)); + + /* writel ensures all descriptors are flushed before activation */ + writel(BIT(0), XOR_ACTIVATION(chan)); } static char mv_chan_is_busy(struct mv_xor_chan *chan) diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ab26d46bbe15..5ebdfbc1051e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -113,11 +113,9 @@ struct sa11x0_dma_phy { struct sa11x0_dma_desc *txd_load; unsigned sg_done; struct sa11x0_dma_desc *txd_done; -#ifdef CONFIG_PM_SLEEP u32 dbs[2]; u32 dbt[2]; u32 dcsr; -#endif }; struct sa11x0_dma_dev { @@ -984,7 +982,6 @@ static int sa11x0_dma_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP static int sa11x0_dma_suspend(struct device *dev) { struct sa11x0_dma_dev *d = dev_get_drvdata(dev); @@ -1054,7 +1051,6 @@ static int sa11x0_dma_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops sa11x0_dma_pm_ops = { .suspend_noirq = sa11x0_dma_suspend, diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c index 7762665ad8fd..876de9ac3793 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c @@ -1009,7 +1009,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, } if (outp == 8) - return false; + return conf; data = exec_lookup(priv, head, outp, ctrl, dcb, &ver, &hdr, &cnt, &len, &info1); if (data == 0x0000) diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c index 43fec17ea540..bbf117be572f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/therm/nvd0.c @@ -40,6 +40,7 @@ pwm_info(struct nouveau_therm *therm, int line) case 0x00: return 2; case 0x19: return 1; case 0x1c: return 0; + case 0x1e: return 2; default: break; } diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 68528619834a..8149e7cf4303 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1642,6 +1642,7 @@ struct radeon_vce { unsigned fb_version; atomic_t handles[RADEON_MAX_VCE_HANDLES]; struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; + unsigned img_size[RADEON_MAX_VCE_HANDLES]; struct delayed_work idle_work; }; @@ -1655,7 +1656,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, uint32_t handle, struct radeon_fence **fence); void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp); void radeon_vce_note_usage(struct radeon_device *rdev); -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi); +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, unsigned size); int radeon_vce_cs_parse(struct radeon_cs_parser *p); bool radeon_vce_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, @@ -2640,7 +2641,8 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE8(rdev) ((rdev->family >= CHIP_BONAIRE)) #define ASIC_IS_DCE81(rdev) ((rdev->family == CHIP_KAVERI)) #define ASIC_IS_DCE82(rdev) ((rdev->family == CHIP_BONAIRE)) -#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI)) +#define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ + (rdev->family == CHIP_MULLINS)) #define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ (rdev->ddev->pdev->device == 0x6850) || \ diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index b3633d9a5317..9ab30976287d 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -196,6 +196,20 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } } + if (!found) { + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + dhandle = ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } + } + if (!found) return false; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 408b6ac53f0b..f00dbbf4d806 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -999,7 +999,7 @@ void radeon_compute_pll_avivo(struct radeon_pll *pll, /* avoid high jitter with small fractional dividers */ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { - fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); + fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 50); if (fb_div < fb_div_min) { unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); fb_div *= tmp; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 0cc47f12d995..eaaedba04675 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -577,28 +577,29 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) return r; } - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; - } + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } - /* map the ib pool buffer read only into - * virtual address space */ - bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, - rdev->ring_tmp_bo.bo); - r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, - RADEON_VM_PAGE_READABLE | - RADEON_VM_PAGE_SNOOPED); + /* map the ib pool buffer read only into + * virtual address space */ + bo_va = radeon_vm_bo_add(rdev, &fpriv->vm, + rdev->ring_tmp_bo.bo); + r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET, + RADEON_VM_PAGE_READABLE | + RADEON_VM_PAGE_SNOOPED); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); - if (r) { - radeon_vm_fini(rdev, &fpriv->vm); - kfree(fpriv); - return r; + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (r) { + radeon_vm_fini(rdev, &fpriv->vm); + kfree(fpriv); + return r; + } } - file_priv->driver_priv = fpriv; } @@ -626,13 +627,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev, struct radeon_bo_va *bo_va; int r; - r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); - if (!r) { - bo_va = radeon_vm_bo_find(&fpriv->vm, - rdev->ring_tmp_bo.bo); - if (bo_va) - radeon_vm_bo_rmv(rdev, bo_va); - radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + if (rdev->accel_working) { + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); + if (!r) { + bo_va = radeon_vm_bo_find(&fpriv->vm, + rdev->ring_tmp_bo.bo); + if (bo_va) + radeon_vm_bo_rmv(rdev, bo_va); + radeon_bo_unreserve(rdev->ring_tmp_bo.bo); + } } radeon_vm_fini(rdev, &fpriv->vm); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 19bec0dbfa38..4faa4d6f9bb4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -458,7 +458,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, * into account. We don't want to disallow buffer moves * completely. */ - if (current_domain != RADEON_GEM_DOMAIN_CPU && + if ((lobj->alt_domain & current_domain) != 0 && (domain & current_domain) == 0 && /* will be moved */ bytes_moved > bytes_moved_threshold) { /* don't move it */ @@ -699,22 +699,30 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) rbo = container_of(bo, struct radeon_bo, tbo); radeon_bo_check_tiling(rbo, 0, 0); rdev = rbo->rdev; - if (bo->mem.mem_type == TTM_PL_VRAM) { - size = bo->mem.num_pages << PAGE_SHIFT; - offset = bo->mem.start << PAGE_SHIFT; - if ((offset + size) > rdev->mc.visible_vram_size) { - /* hurrah the memory is not visible ! */ - radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); - rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; - r = ttm_bo_validate(bo, &rbo->placement, false, false); - if (unlikely(r != 0)) - return r; - offset = bo->mem.start << PAGE_SHIFT; - /* this should not happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; - } + if (bo->mem.mem_type != TTM_PL_VRAM) + return 0; + + size = bo->mem.num_pages << PAGE_SHIFT; + offset = bo->mem.start << PAGE_SHIFT; + if ((offset + size) <= rdev->mc.visible_vram_size) + return 0; + + /* hurrah the memory is not visible ! */ + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); + rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; + r = ttm_bo_validate(bo, &rbo->placement, false, false); + if (unlikely(r == -ENOMEM)) { + radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); + return ttm_bo_validate(bo, &rbo->placement, false, false); + } else if (unlikely(r != 0)) { + return r; } + + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return -EINVAL; + return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f30b8426eee2..53d6e1bb48dc 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -361,6 +361,11 @@ static ssize_t radeon_set_pm_profile(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set profile when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (rdev->pm.pm_method == PM_METHOD_PROFILE) { if (strncmp("default", buf, strlen("default")) == 0) @@ -409,6 +414,13 @@ static ssize_t radeon_set_pm_method(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set method when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + count = -EINVAL; + goto fail; + } + /* we don't support the legacy modes with dpm */ if (rdev->pm.pm_method == PM_METHOD_DPM) { count = -EINVAL; @@ -446,6 +458,10 @@ static ssize_t radeon_get_dpm_state(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -459,6 +475,11 @@ static ssize_t radeon_set_dpm_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct radeon_device *rdev = ddev->dev_private; + /* Can't set dpm state when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("battery", buf, strlen("battery")) == 0) rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -485,6 +506,10 @@ static ssize_t radeon_get_dpm_forced_performance_level(struct device *dev, struct radeon_device *rdev = ddev->dev_private; enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return snprintf(buf, PAGE_SIZE, "off\n"); + return snprintf(buf, PAGE_SIZE, "%s\n", (level == RADEON_DPM_FORCED_LEVEL_AUTO) ? "auto" : (level == RADEON_DPM_FORCED_LEVEL_LOW) ? "low" : "high"); @@ -500,6 +525,11 @@ static ssize_t radeon_set_dpm_forced_performance_level(struct device *dev, enum radeon_dpm_forced_level level; int ret = 0; + /* Can't force performance level when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + mutex_lock(&rdev->pm.mutex); if (strncmp("low", buf, strlen("low")) == 0) { level = RADEON_DPM_FORCED_LEVEL_LOW; @@ -538,8 +568,14 @@ static ssize_t radeon_hwmon_show_temp(struct device *dev, char *buf) { struct radeon_device *rdev = dev_get_drvdata(dev); + struct drm_device *ddev = rdev->ddev; int temp; + /* Can't get temperature when the card is off */ + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) + return -EINVAL; + if (rdev->asic->pm.get_temperature) temp = radeon_get_temperature(rdev); else @@ -1614,8 +1650,12 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct radeon_device *rdev = dev->dev_private; + struct drm_device *ddev = rdev->ddev; - if (rdev->pm.dpm_enabled) { + if ((rdev->flags & RADEON_IS_PX) && + (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) { + seq_printf(m, "PX asic powered off\n"); + } else if (rdev->pm.dpm_enabled) { mutex_lock(&rdev->pm.mutex); if (rdev->asic->dpm.debugfs_print_current_performance_level) radeon_dpm_debugfs_print_current_performance_level(rdev, m); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index f73324c81491..3971d968af6c 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -443,13 +443,16 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring, * @p: parser context * @lo: address of lower dword * @hi: address of higher dword + * @size: size of checker for relocation buffer * * Patch relocation inside command stream with real buffer address */ -int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) +int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi, + unsigned size) { struct radeon_cs_chunk *relocs_chunk; - uint64_t offset; + struct radeon_cs_reloc *reloc; + uint64_t start, end, offset; unsigned idx; relocs_chunk = &p->chunks[p->chunk_relocs_idx]; @@ -462,15 +465,60 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) return -EINVAL; } - offset += p->relocs_ptr[(idx / 4)]->gpu_offset; + reloc = p->relocs_ptr[(idx / 4)]; + start = reloc->gpu_offset; + end = start + radeon_bo_size(reloc->robj); + start += offset; - p->ib.ptr[lo] = offset & 0xFFFFFFFF; - p->ib.ptr[hi] = offset >> 32; + p->ib.ptr[lo] = start & 0xFFFFFFFF; + p->ib.ptr[hi] = start >> 32; + + if (end <= start) { + DRM_ERROR("invalid reloc offset %llX!\n", offset); + return -EINVAL; + } + if ((end - start) < size) { + DRM_ERROR("buffer to small (%d / %d)!\n", + (unsigned)(end - start), size); + return -EINVAL; + } return 0; } /** + * radeon_vce_validate_handle - validate stream handle + * + * @p: parser context + * @handle: handle to validate + * + * Validates the handle and return the found session index or -EINVAL + * we we don't have another free session index. + */ +int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle) +{ + unsigned i; + + /* validate the handle */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (atomic_read(&p->rdev->vce.handles[i]) == handle) + return i; + } + + /* handle not found try to alloc a new one */ + for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { + if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { + p->rdev->vce.filp[i] = p->filp; + p->rdev->vce.img_size[i] = 0; + return i; + } + } + + DRM_ERROR("No more free VCE handles!\n"); + return -EINVAL; +} + +/** * radeon_vce_cs_parse - parse and validate the command stream * * @p: parser context @@ -478,8 +526,10 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi) */ int radeon_vce_cs_parse(struct radeon_cs_parser *p) { - uint32_t handle = 0; - bool destroy = false; + int session_idx = -1; + bool destroyed = false; + uint32_t tmp, handle = 0; + uint32_t *size = &tmp; int i, r; while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { @@ -491,13 +541,29 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (destroyed) { + DRM_ERROR("No other command allowed after destroy!\n"); + return -EINVAL; + } + switch (cmd) { case 0x00000001: // session handle = radeon_get_ib_value(p, p->idx + 2); + session_idx = radeon_vce_validate_handle(p, handle); + if (session_idx < 0) + return session_idx; + size = &p->rdev->vce.img_size[session_idx]; break; case 0x00000002: // task info + break; + case 0x01000001: // create + *size = radeon_get_ib_value(p, p->idx + 8) * + radeon_get_ib_value(p, p->idx + 10) * + 8 * 3 / 2; + break; + case 0x04000001: // config extension case 0x04000002: // pic control case 0x04000005: // rate control @@ -506,23 +572,39 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) break; case 0x03000001: // encode - r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9); + r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9, + *size); if (r) return r; - r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11); + r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11, + *size / 3); if (r) return r; break; case 0x02000001: // destroy - destroy = true; + destroyed = true; break; case 0x05000001: // context buffer + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + *size * 2); + if (r) + return r; + break; + case 0x05000004: // video bitstream buffer + tmp = radeon_get_ib_value(p, p->idx + 4); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + tmp); + if (r) + return r; + break; + case 0x05000005: // feedback buffer - r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2); + r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2, + 4096); if (r) return r; break; @@ -532,33 +614,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } + if (session_idx == -1) { + DRM_ERROR("no session command at start of IB\n"); + return -EINVAL; + } + p->idx += len / 4; } - if (destroy) { + if (destroyed) { /* IB contains a destroy msg, free the handle */ for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0); - - return 0; - } - - /* create or encode, validate the handle */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->rdev->vce.handles[i]) == handle) - return 0; } - /* handle not found try to alloc a new one */ - for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) { - p->rdev->vce.filp[i] = p->filp; - return 0; - } - } - - DRM_ERROR("No more free VCE handles!\n"); - return -EINVAL; + return 0; } /** diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 2aae6ce49d32..d9ab99f47612 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -595,7 +595,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev, ndw = 64; /* assume the worst case */ - ndw += vm->max_pde_used * 12; + ndw += vm->max_pde_used * 16; /* update too big for an IB */ if (ndw > 0xfffff) diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 683532f84931..7321283602ce 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -107,8 +107,8 @@ #define SPLL_CHG_STATUS (1 << 1) #define SPLL_CNTL_MODE 0x618 #define SPLL_SW_DIR_CONTROL (1 << 0) -# define SPLL_REFCLK_SEL(x) ((x) << 8) -# define SPLL_REFCLK_SEL_MASK 0xFF00 +# define SPLL_REFCLK_SEL(x) ((x) << 26) +# define SPLL_REFCLK_SEL_MASK (3 << 26) #define CG_SPLL_SPREAD_SPECTRUM 0x620 #define SSEN (1 << 0) diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index bc196f49ec53..4af0da96c2e2 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1053,7 +1053,7 @@ config SENSORS_PC87427 config SENSORS_NTC_THERMISTOR tristate "NTC thermistor support" - depends on (!OF && !IIO) || (OF && IIO) + depends on !OF || IIO=n || IIO help This driver supports NTC thermistors sensor reading and its interpretation. The driver can also monitor the temperature and diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c index 8a17f01e8672..e76feb86a1d4 100644 --- a/drivers/hwmon/ntc_thermistor.c +++ b/drivers/hwmon/ntc_thermistor.c @@ -44,6 +44,7 @@ struct ntc_compensation { unsigned int ohm; }; +/* Order matters, ntc_match references the entries by index */ static const struct platform_device_id ntc_thermistor_id[] = { { "ncp15wb473", TYPE_NCPXXWB473 }, { "ncp18wb473", TYPE_NCPXXWB473 }, @@ -141,7 +142,7 @@ struct ntc_data { char name[PLATFORM_NAME_SIZE]; }; -#ifdef CONFIG_OF +#if defined(CONFIG_OF) && IS_ENABLED(CONFIG_IIO) static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) { struct iio_channel *channel = pdata->chan; @@ -163,15 +164,15 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata) static const struct of_device_id ntc_match[] = { { .compatible = "ntc,ncp15wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[0] }, { .compatible = "ntc,ncp18wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[1] }, { .compatible = "ntc,ncp21wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[2] }, { .compatible = "ntc,ncp03wb473", - .data = &ntc_thermistor_id[TYPE_NCPXXWB473] }, + .data = &ntc_thermistor_id[3] }, { .compatible = "ntc,ncp15wl333", - .data = &ntc_thermistor_id[TYPE_NCPXXWL333] }, + .data = &ntc_thermistor_id[4] }, { }, }; MODULE_DEVICE_TABLE(of, ntc_match); @@ -223,6 +224,8 @@ ntc_thermistor_parse_dt(struct platform_device *pdev) return NULL; } +#define ntc_match NULL + static void ntc_iio_channel_release(struct ntc_thermistor_platform_data *pdata) { } #endif diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1b6dbe156a37..199c7896f081 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -48,6 +48,7 @@ #include <linux/mlx4/driver.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> #include "mlx4_ib.h" #include "user.h" @@ -1614,6 +1615,53 @@ static int mlx4_ib_inet6_event(struct notifier_block *this, unsigned long event, } #endif +#define MLX4_IB_INVALID_MAC ((u64)-1) +static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + int port) +{ + u64 new_smac = 0; + u64 release_mac = MLX4_IB_INVALID_MAC; + struct mlx4_ib_qp *qp; + + read_lock(&dev_base_lock); + new_smac = mlx4_mac_to_u64(dev->dev_addr); + read_unlock(&dev_base_lock); + + mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); + qp = ibdev->qp1_proxy[port - 1]; + if (qp) { + int new_smac_index; + u64 old_smac = qp->pri.smac; + struct mlx4_update_qp_params update_params; + + if (new_smac == old_smac) + goto unlock; + + new_smac_index = mlx4_register_mac(ibdev->dev, port, new_smac); + + if (new_smac_index < 0) + goto unlock; + + update_params.smac_index = new_smac_index; + if (mlx4_update_qp(ibdev->dev, &qp->mqp, MLX4_UPDATE_QP_SMAC, + &update_params)) { + release_mac = new_smac; + goto unlock; + } + + qp->pri.smac = new_smac; + qp->pri.smac_index = new_smac_index; + + release_mac = old_smac; + } + +unlock: + mutex_unlock(&ibdev->qp1_proxy_lock[port - 1]); + if (release_mac != MLX4_IB_INVALID_MAC) + mlx4_unregister_mac(ibdev->dev, port, release_mac); +} + static void mlx4_ib_get_dev_addr(struct net_device *dev, struct mlx4_ib_dev *ibdev, u8 port) { @@ -1689,9 +1737,13 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev) return 0; } -static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) +static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, + struct net_device *dev, + unsigned long event) + { struct mlx4_ib_iboe *iboe; + int update_qps_port = -1; int port; iboe = &ibdev->iboe; @@ -1719,6 +1771,11 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) } curr_master = iboe->masters[port - 1]; + if (dev == iboe->netdevs[port - 1] && + (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER || + event == NETDEV_UP || event == NETDEV_CHANGE)) + update_qps_port = port; + if (curr_netdev) { port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; @@ -1752,6 +1809,9 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev) } spin_unlock(&iboe->lock); + + if (update_qps_port > 0) + mlx4_ib_update_qps(ibdev, dev, update_qps_port); } static int mlx4_ib_netdev_event(struct notifier_block *this, @@ -1764,7 +1824,7 @@ static int mlx4_ib_netdev_event(struct notifier_block *this, return NOTIFY_DONE; ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); - mlx4_ib_scan_netdevs(ibdev); + mlx4_ib_scan_netdevs(ibdev, dev, event); return NOTIFY_DONE; } @@ -2043,6 +2103,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) goto err_map; for (i = 0; i < ibdev->num_ports; ++i) { + mutex_init(&ibdev->qp1_proxy_lock[i]); if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]); @@ -2126,7 +2187,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) for (i = 1 ; i <= ibdev->num_ports ; ++i) reset_gid_table(ibdev, i); rtnl_lock(); - mlx4_ib_scan_netdevs(ibdev); + mlx4_ib_scan_netdevs(ibdev, NULL, 0); rtnl_unlock(); mlx4_ib_init_gid_table(ibdev); } diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index f589522fddfd..66b0b7dbd9f4 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -522,6 +522,9 @@ struct mlx4_ib_dev { int steer_qpn_count; int steer_qpn_base; int steering_support; + struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; + /* lock when destroying qp1_proxy and getting netdev events */ + struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; }; struct ib_event_work { diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 41308af4163c..dc57482ae7af 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1132,6 +1132,12 @@ int mlx4_ib_destroy_qp(struct ib_qp *qp) if (is_qp0(dev, mqp)) mlx4_CLOSE_PORT(dev->dev, mqp->port); + if (dev->qp1_proxy[mqp->port - 1] == mqp) { + mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); + dev->qp1_proxy[mqp->port - 1] = NULL; + mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); + } + pd = get_pd(mqp); destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); @@ -1646,6 +1652,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); if (err) return -EINVAL; + if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) + dev->qp1_proxy[qp->port - 1] = qp; } } } diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 9f69e818b000..93580a47cc54 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -82,7 +82,8 @@ static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) } /* Forward declaration */ -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]); +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match); static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp); static void rlb_src_unlink(struct bonding *bond, u32 index); static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, @@ -459,7 +460,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[]) bond->alb_info.rlb_promisc_timeout_counter = 0; - alb_send_learning_packets(bond->curr_active_slave, addr); + alb_send_learning_packets(bond->curr_active_slave, addr, true); } /* slave being removed should not be active at this point @@ -995,7 +996,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id) /*********************** tlb/rlb shared functions *********************/ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], - u16 vid) + __be16 vlan_proto, u16 vid) { struct learning_pkt pkt; struct sk_buff *skb; @@ -1021,7 +1022,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], skb->dev = slave->dev; if (vid) { - skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid); + skb = vlan_put_tag(skb, vlan_proto, vid); if (!skb) { pr_err("%s: Error: failed to insert VLAN tag\n", slave->bond->dev->name); @@ -1032,22 +1033,32 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[], dev_queue_xmit(skb); } - -static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[]) +static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[], + bool strict_match) { struct bonding *bond = bond_get_bond_by_slave(slave); struct net_device *upper; struct list_head *iter; /* send untagged */ - alb_send_lp_vid(slave, mac_addr, 0); + alb_send_lp_vid(slave, mac_addr, 0, 0); /* loop through vlans and send one packet for each */ rcu_read_lock(); netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper->priv_flags & IFF_802_1Q_VLAN) - alb_send_lp_vid(slave, mac_addr, - vlan_dev_vlan_id(upper)); + if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) { + if (strict_match && + ether_addr_equal_64bits(mac_addr, + upper->dev_addr)) { + alb_send_lp_vid(slave, mac_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } else if (!strict_match) { + alb_send_lp_vid(slave, upper->dev_addr, + vlan_dev_vlan_proto(upper), + vlan_dev_vlan_id(upper)); + } + } } rcu_read_unlock(); } @@ -1107,7 +1118,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, /* fasten the change in the switch */ if (SLAVE_IS_OK(slave1)) { - alb_send_learning_packets(slave1, slave1->dev->dev_addr); + alb_send_learning_packets(slave1, slave1->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1119,7 +1130,7 @@ static void alb_fasten_mac_swap(struct bonding *bond, struct slave *slave1, } if (SLAVE_IS_OK(slave2)) { - alb_send_learning_packets(slave2, slave2->dev->dev_addr); + alb_send_learning_packets(slave2, slave2->dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform the clients that the mac address * has changed @@ -1490,6 +1501,8 @@ void bond_alb_monitor(struct work_struct *work) /* send learning packets */ if (bond_info->lp_counter >= BOND_ALB_LP_TICKS(bond)) { + bool strict_match; + /* change of curr_active_slave involves swapping of mac addresses. * in order to avoid this swapping from happening while * sending the learning packets, the curr_slave_lock must be held for @@ -1497,8 +1510,15 @@ void bond_alb_monitor(struct work_struct *work) */ read_lock(&bond->curr_slave_lock); - bond_for_each_slave_rcu(bond, slave, iter) - alb_send_learning_packets(slave, slave->dev->dev_addr); + bond_for_each_slave_rcu(bond, slave, iter) { + /* If updating current_active, use all currently + * user mac addreses (!strict_match). Otherwise, only + * use mac of the slave device. + */ + strict_match = (slave != bond->curr_active_slave); + alb_send_learning_packets(slave, slave->dev->dev_addr, + strict_match); + } read_unlock(&bond->curr_slave_lock); @@ -1721,7 +1741,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave } else { /* set the new_slave to the bond mac address */ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr); - alb_send_learning_packets(new_slave, bond->dev->dev_addr); + alb_send_learning_packets(new_slave, bond->dev->dev_addr, + false); } write_lock_bh(&bond->curr_slave_lock); @@ -1764,7 +1785,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr) alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr); read_lock(&bond->lock); - alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr); + alb_send_learning_packets(bond->curr_active_slave, + bond_dev->dev_addr, false); if (bond->alb_info.rlb_enabled) { /* inform clients mac address has changed */ rlb_req_update_slave_clients(bond, bond->curr_active_slave); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69aff72c8957..d3a67896d435 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2126,10 +2126,10 @@ static bool bond_has_this_ip(struct bonding *bond, __be32 ip) */ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, - struct bond_vlan_tag *inner, - struct bond_vlan_tag *outer) + struct bond_vlan_tag *tags) { struct sk_buff *skb; + int i; pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n", arp_op, slave_dev->name, &dest_ip, &src_ip); @@ -2141,21 +2141,26 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, net_err_ratelimited("ARP packet allocation failed\n"); return; } - if (outer->vlan_id) { - if (inner->vlan_id) { - pr_debug("inner tag: proto %X vid %X\n", - ntohs(inner->vlan_proto), inner->vlan_id); - skb = __vlan_put_tag(skb, inner->vlan_proto, - inner->vlan_id); - if (!skb) { - net_err_ratelimited("failed to insert inner VLAN tag\n"); - return; - } - } - pr_debug("outer reg: proto %X vid %X\n", - ntohs(outer->vlan_proto), outer->vlan_id); - skb = vlan_put_tag(skb, outer->vlan_proto, outer->vlan_id); + /* Go through all the tags backwards and add them to the packet */ + for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) { + if (!tags[i].vlan_id) + continue; + + pr_debug("inner tag: proto %X vid %X\n", + ntohs(tags[i].vlan_proto), tags[i].vlan_id); + skb = __vlan_put_tag(skb, tags[i].vlan_proto, + tags[i].vlan_id); + if (!skb) { + net_err_ratelimited("failed to insert inner VLAN tag\n"); + return; + } + } + /* Set the outer tag */ + if (tags[0].vlan_id) { + pr_debug("outer tag: proto %X vid %X\n", + ntohs(tags[0].vlan_proto), tags[0].vlan_id); + skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id); if (!skb) { net_err_ratelimited("failed to insert outer VLAN tag\n"); return; @@ -2164,22 +2169,52 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, arp_xmit(skb); } +/* Validate the device path between the @start_dev and the @end_dev. + * The path is valid if the @end_dev is reachable through device + * stacking. + * When the path is validated, collect any vlan information in the + * path. + */ +static bool bond_verify_device_path(struct net_device *start_dev, + struct net_device *end_dev, + struct bond_vlan_tag *tags) +{ + struct net_device *upper; + struct list_head *iter; + int idx; + + if (start_dev == end_dev) + return true; + + netdev_for_each_upper_dev_rcu(start_dev, upper, iter) { + if (bond_verify_device_path(upper, end_dev, tags)) { + if (is_vlan_dev(upper)) { + idx = vlan_get_encap_level(upper); + if (idx >= BOND_MAX_VLAN_ENCAP) + return false; + + tags[idx].vlan_proto = + vlan_dev_vlan_proto(upper); + tags[idx].vlan_id = vlan_dev_vlan_id(upper); + } + return true; + } + } + + return false; +} static void bond_arp_send_all(struct bonding *bond, struct slave *slave) { - struct net_device *upper, *vlan_upper; - struct list_head *iter, *vlan_iter; struct rtable *rt; - struct bond_vlan_tag inner, outer; + struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP]; __be32 *targets = bond->params.arp_targets, addr; int i; + bool ret; for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) { pr_debug("basa: target %pI4\n", &targets[i]); - inner.vlan_proto = 0; - inner.vlan_id = 0; - outer.vlan_proto = 0; - outer.vlan_id = 0; + memset(tags, 0, sizeof(tags)); /* Find out through which dev should the packet go */ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, @@ -2192,7 +2227,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n", bond->dev->name, &targets[i]); - bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, &inner, &outer); + bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], + 0, tags); continue; } @@ -2201,52 +2237,12 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) goto found; rcu_read_lock(); - /* first we search only for vlan devices. for every vlan - * found we verify its upper dev list, searching for the - * rt->dst.dev. If found we save the tag of the vlan and - * proceed to send the packet. - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, vlan_upper, - vlan_iter) { - if (!is_vlan_dev(vlan_upper)) - continue; - - if (vlan_upper == rt->dst.dev) { - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - netdev_for_each_all_upper_dev_rcu(vlan_upper, upper, - iter) { - if (upper == rt->dst.dev) { - /* If the upper dev is a vlan dev too, - * set the vlan tag to inner tag. - */ - if (is_vlan_dev(upper)) { - inner.vlan_proto = vlan_dev_vlan_proto(upper); - inner.vlan_id = vlan_dev_vlan_id(upper); - } - outer.vlan_proto = vlan_dev_vlan_proto(vlan_upper); - outer.vlan_id = vlan_dev_vlan_id(vlan_upper); - rcu_read_unlock(); - goto found; - } - } - } - - /* if the device we're looking for is not on top of any of - * our upper vlans, then just search for any dev that - * matches, and in case it's a vlan - save the id - */ - netdev_for_each_all_upper_dev_rcu(bond->dev, upper, iter) { - if (upper == rt->dst.dev) { - rcu_read_unlock(); - goto found; - } - } + ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags); rcu_read_unlock(); + if (ret) + goto found; + /* Not our device - skip */ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n", bond->dev->name, &targets[i], @@ -2259,7 +2255,7 @@ found: addr = bond_confirm_addr(rt->dst.dev, targets[i], 0); ip_rt_put(rt); bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], - addr, &inner, &outer); + addr, tags); } } diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 724e30fa20b9..832070298446 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -125,6 +125,7 @@ static const struct bond_opt_value bond_fail_over_mac_tbl[] = { static const struct bond_opt_value bond_intmax_tbl[] = { { "off", 0, BOND_VALFLAG_DEFAULT}, { "maxval", INT_MAX, BOND_VALFLAG_MAX}, + { NULL, -1, 0} }; static const struct bond_opt_value bond_lacp_rate_tbl[] = { diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index b8bdd0acc8f3..00bea320e3b5 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -36,6 +36,7 @@ #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n" +#define BOND_MAX_VLAN_ENCAP 2 #define BOND_MAX_ARP_TARGETS 16 #define BOND_DEFAULT_MIIMON 100 diff --git a/drivers/net/can/c_can/Kconfig b/drivers/net/can/c_can/Kconfig index 8ab7103d4f44..61ffc12d8fd8 100644 --- a/drivers/net/can/c_can/Kconfig +++ b/drivers/net/can/c_can/Kconfig @@ -14,13 +14,6 @@ config CAN_C_CAN_PLATFORM SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) boards like am335x, dm814x, dm813x and dm811x. -config CAN_C_CAN_STRICT_FRAME_ORDERING - bool "Force a strict RX CAN frame order (may cause frame loss)" - ---help--- - The RX split buffer prevents packet reordering but can cause packet - loss. Only enable this option when you accept to lose CAN frames - in favour of getting the received CAN frames in the correct order. - config CAN_C_CAN_PCI tristate "Generic PCI Bus based C_CAN/D_CAN driver" depends on PCI diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index a2ca820b5373..95e04e2002da 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c @@ -732,26 +732,12 @@ static u32 c_can_adjust_pending(u32 pend) static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - if (obj < C_CAN_MSG_RX_LOW_LAST) - c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW); - else -#endif c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - if (obj < C_CAN_MSG_RX_LOW_LAST) - priv->rxmasked |= BIT(obj - 1); - else if (obj == C_CAN_MSG_RX_LOW_LAST) { - priv->rxmasked = 0; - /* activate all lower message objects */ - c_can_activate_all_lower_rx_msg_obj(dev, IF_RX); - } -#endif if (priv->type != BOSCH_D_CAN) c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT); } @@ -799,9 +785,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) { u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); -#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING - pend &= ~priv->rxmasked; -#endif return pend; } @@ -814,25 +797,6 @@ static inline u32 c_can_get_pending(struct c_can_priv *priv) * has arrived. To work-around this issue, we keep two groups of message * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT. * - * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y - * - * To ensure in-order frame reception we use the following - * approach while re-activating a message object to receive further - * frames: - * - if the current message object number is lower than - * C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing - * the INTPND bit. - * - if the current message object number is equal to - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower - * receive message objects. - * - if the current message object number is greater than - * C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of - * only this message object. - * - * This can cause packet loss! - * - * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n - * * We clear the newdat bit right away. * * This can result in packet reordering when the readout is slow. diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c index c540e3d12e3d..564933ae218c 100644 --- a/drivers/net/can/sja1000/peak_pci.c +++ b/drivers/net/can/sja1000/peak_pci.c @@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; - struct net_device *dev; + struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; @@ -688,11 +688,13 @@ failure_remove_channels: writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; - for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) { - unregister_sja1000dev(dev); - free_sja1000dev(dev); + for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; + prev_dev = chan->prev_dev; + + unregister_sja1000dev(dev); + free_sja1000dev(dev); } /* free any PCIeC resources too */ @@ -726,10 +728,12 @@ static void peak_pci_remove(struct pci_dev *pdev) /* Loop over all registered devices */ while (1) { + struct net_device *prev_dev = chan->prev_dev; + dev_info(&pdev->dev, "removing device %s\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); - dev = chan->prev_dev; + dev = prev_dev; if (!dev) { /* do that only for first channel */ diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 39b26fe28d10..d7401017a3f1 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -35,6 +35,18 @@ source "drivers/net/ethernet/calxeda/Kconfig" source "drivers/net/ethernet/chelsio/Kconfig" source "drivers/net/ethernet/cirrus/Kconfig" source "drivers/net/ethernet/cisco/Kconfig" + +config CX_ECAT + tristate "Beckhoff CX5020 EtherCAT master support" + depends on PCI + ---help--- + Driver for EtherCAT master module located on CCAT FPGA + that can be found on Beckhoff CX5020, and possibly other of CX + Beckhoff CX series industrial PCs. + + To compile this driver as a module, choose M here. The module + will be called ec_bhf. + source "drivers/net/ethernet/davicom/Kconfig" config DNET diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 545d0b3b9cb4..35190e36c456 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/ obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/ obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/ obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/ +obj-$(CONFIG_CX_ECAT) += ec_bhf.o obj-$(CONFIG_DM9000) += davicom/ obj-$(CONFIG_DNET) += dnet.o obj-$(CONFIG_NET_VENDOR_DEC) += dec/ diff --git a/drivers/net/ethernet/altera/Makefile b/drivers/net/ethernet/altera/Makefile index d4a187e45369..3eff2fd3997e 100644 --- a/drivers/net/ethernet/altera/Makefile +++ b/drivers/net/ethernet/altera/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_ALTERA_TSE) += altera_tse.o altera_tse-objs := altera_tse_main.o altera_tse_ethtool.o \ altera_msgdma.o altera_sgdma.o altera_utils.o +ccflags-y += -D__CHECK_ENDIAN__ diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 4d1f2fdd5c32..0fb986ba3290 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c @@ -37,18 +37,16 @@ void msgdma_start_rxdma(struct altera_tse_private *priv) void msgdma_reset(struct altera_tse_private *priv) { int counter; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; /* Reset Rx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &rxcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, + msgdma_csroffs(status)); + csrwr32(MSGDMA_CSR_CTL_RESET, priv->rx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&rxcsr->status, + if (tse_bit_is_clear(priv->rx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -59,15 +57,18 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Rx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &rxcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->rx_dma_csr, msgdma_csroffs(status)); /* Reset Tx mSGDMA */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); - iowrite32(MSGDMA_CSR_CTL_RESET, &txcsr->control); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, + msgdma_csroffs(status)); + + csrwr32(MSGDMA_CSR_CTL_RESET, priv->tx_dma_csr, + msgdma_csroffs(control)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(&txcsr->status, + if (tse_bit_is_clear(priv->tx_dma_csr, msgdma_csroffs(status), MSGDMA_CSR_STAT_RESETTING)) break; udelay(1); @@ -78,58 +79,58 @@ void msgdma_reset(struct altera_tse_private *priv) "TSE Tx mSGDMA resetting bit never cleared!\n"); /* clear all status bits */ - iowrite32(MSGDMA_CSR_STAT_MASK, &txcsr->status); + csrwr32(MSGDMA_CSR_STAT_MASK, priv->tx_dma_csr, msgdma_csroffs(status)); } void msgdma_disable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_clear_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_enable_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_set_bit(priv->rx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_disable_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - tse_clear_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_clear_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_enable_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - tse_set_bit(&csr->control, MSGDMA_CSR_CTL_GLOBAL_INTR); + tse_set_bit(priv->tx_dma_csr, msgdma_csroffs(control), + MSGDMA_CSR_CTL_GLOBAL_INTR); } void msgdma_clear_rxirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->rx_dma_csr; - iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->rx_dma_csr, msgdma_csroffs(status)); } void msgdma_clear_txirq(struct altera_tse_private *priv) { - struct msgdma_csr *csr = priv->tx_dma_csr; - iowrite32(MSGDMA_CSR_STAT_IRQ, &csr->status); + csrwr32(MSGDMA_CSR_STAT_IRQ, priv->tx_dma_csr, msgdma_csroffs(status)); } /* return 0 to indicate transmit is pending */ int msgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - struct msgdma_extended_desc *desc = priv->tx_dma_desc; - - iowrite32(lower_32_bits(buffer->dma_addr), &desc->read_addr_lo); - iowrite32(upper_32_bits(buffer->dma_addr), &desc->read_addr_hi); - iowrite32(0, &desc->write_addr_lo); - iowrite32(0, &desc->write_addr_hi); - iowrite32(buffer->len, &desc->len); - iowrite32(0, &desc->burst_seq_num); - iowrite32(MSGDMA_DESC_TX_STRIDE, &desc->stride); - iowrite32(MSGDMA_DESC_CTL_TX_SINGLE, &desc->control); + csrwr32(lower_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_lo)); + csrwr32(upper_32_bits(buffer->dma_addr), priv->tx_dma_desc, + msgdma_descroffs(read_addr_hi)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_lo)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(write_addr_hi)); + csrwr32(buffer->len, priv->tx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->tx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(MSGDMA_DESC_TX_STRIDE, priv->tx_dma_desc, + msgdma_descroffs(stride)); + csrwr32(MSGDMA_DESC_CTL_TX_SINGLE, priv->tx_dma_desc, + msgdma_descroffs(control)); return 0; } @@ -138,17 +139,16 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) u32 ready = 0; u32 inuse; u32 status; - struct msgdma_csr *txcsr = - (struct msgdma_csr *)priv->tx_dma_csr; /* Get number of sent descriptors */ - inuse = ioread32(&txcsr->rw_fill_level) & 0xffff; + inuse = csrrd32(priv->tx_dma_csr, msgdma_csroffs(rw_fill_level)) + & 0xffff; if (inuse) { /* Tx FIFO is not empty */ ready = priv->tx_prod - priv->tx_cons - inuse - 1; } else { /* Check for buffered last packet */ - status = ioread32(&txcsr->status); + status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); if (status & MSGDMA_CSR_STAT_BUSY) ready = priv->tx_prod - priv->tx_cons - 1; else @@ -162,7 +162,6 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) void msgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *rxbuffer) { - struct msgdma_extended_desc *desc = priv->rx_dma_desc; u32 len = priv->rx_dma_buf_sz; dma_addr_t dma_addr = rxbuffer->dma_addr; u32 control = (MSGDMA_DESC_CTL_END_ON_EOP @@ -172,14 +171,16 @@ void msgdma_add_rx_desc(struct altera_tse_private *priv, | MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO); - iowrite32(0, &desc->read_addr_lo); - iowrite32(0, &desc->read_addr_hi); - iowrite32(lower_32_bits(dma_addr), &desc->write_addr_lo); - iowrite32(upper_32_bits(dma_addr), &desc->write_addr_hi); - iowrite32(len, &desc->len); - iowrite32(0, &desc->burst_seq_num); - iowrite32(0x00010001, &desc->stride); - iowrite32(control, &desc->control); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_lo)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(read_addr_hi)); + csrwr32(lower_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_lo)); + csrwr32(upper_32_bits(dma_addr), priv->rx_dma_desc, + msgdma_descroffs(write_addr_hi)); + csrwr32(len, priv->rx_dma_desc, msgdma_descroffs(len)); + csrwr32(0, priv->rx_dma_desc, msgdma_descroffs(burst_seq_num)); + csrwr32(0x00010001, priv->rx_dma_desc, msgdma_descroffs(stride)); + csrwr32(control, priv->rx_dma_desc, msgdma_descroffs(control)); } /* status is returned on upper 16 bits, @@ -190,14 +191,13 @@ u32 msgdma_rx_status(struct altera_tse_private *priv) u32 rxstatus = 0; u32 pktlength; u32 pktstatus; - struct msgdma_csr *rxcsr = - (struct msgdma_csr *)priv->rx_dma_csr; - struct msgdma_response *rxresp = - (struct msgdma_response *)priv->rx_dma_resp; - - if (ioread32(&rxcsr->resp_fill_level) & 0xffff) { - pktlength = ioread32(&rxresp->bytes_transferred); - pktstatus = ioread32(&rxresp->status); + + if (csrrd32(priv->rx_dma_csr, msgdma_csroffs(resp_fill_level)) + & 0xffff) { + pktlength = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(bytes_transferred)); + pktstatus = csrrd32(priv->rx_dma_resp, + msgdma_respoffs(status)); rxstatus = pktstatus; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff); diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h index d7b59ba4019c..e335626e1b6b 100644 --- a/drivers/net/ethernet/altera/altera_msgdmahw.h +++ b/drivers/net/ethernet/altera/altera_msgdmahw.h @@ -17,15 +17,6 @@ #ifndef __ALTERA_MSGDMAHW_H__ #define __ALTERA_MSGDMAHW_H__ -/* mSGDMA standard descriptor format - */ -struct msgdma_desc { - u32 read_addr; /* data buffer source address */ - u32 write_addr; /* data buffer destination address */ - u32 len; /* the number of bytes to transfer per descriptor */ - u32 control; /* characteristics of the transfer */ -}; - /* mSGDMA extended descriptor format */ struct msgdma_extended_desc { @@ -159,6 +150,10 @@ struct msgdma_response { u32 status; }; +#define msgdma_respoffs(a) (offsetof(struct msgdma_response, a)) +#define msgdma_csroffs(a) (offsetof(struct msgdma_csr, a)) +#define msgdma_descroffs(a) (offsetof(struct msgdma_extended_desc, a)) + /* mSGDMA response register bit definitions */ #define MSGDMA_RESP_EARLY_TERM BIT(8) diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 9ce8630692b6..99cc56f451cf 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c @@ -20,8 +20,8 @@ #include "altera_sgdmahw.h" #include "altera_sgdma.h" -static void sgdma_setup_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, dma_addr_t ndesc_phys, dma_addr_t raddr, dma_addr_t waddr, @@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, int wfixed); static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static int sgdma_async_read(struct altera_tse_private *priv); static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc); + struct sgdma_descrip __iomem *desc); static int sgdma_txbusy(struct altera_tse_private *priv); @@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv) priv->rxdescphys = (dma_addr_t) 0; priv->txdescphys = (dma_addr_t) 0; - priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, + priv->rxdescphys = dma_map_single(priv->device, + (void __force *)priv->rx_dma_desc, priv->rxdescmem, DMA_BIDIRECTIONAL); if (dma_mapping_error(priv->device, priv->rxdescphys)) { @@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv) return -EINVAL; } - priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, + priv->txdescphys = dma_map_single(priv->device, + (void __force *)priv->tx_dma_desc, priv->txdescmem, DMA_TO_DEVICE); if (dma_mapping_error(priv->device, priv->txdescphys)) { @@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv) } /* Initialize descriptor memory to all 0's, sync memory to cache */ - memset(priv->tx_dma_desc, 0, priv->txdescmem); - memset(priv->rx_dma_desc, 0, priv->rxdescmem); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); dma_sync_single_for_device(priv->device, priv->txdescphys, priv->txdescmem, DMA_TO_DEVICE); @@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv) */ void sgdma_reset(struct altera_tse_private *priv) { - u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc; - u32 txdescriplen = priv->txdescmem; - u32 *prxdescripmem = (u32 *)priv->rx_dma_desc; - u32 rxdescriplen = priv->rxdescmem; - struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr; - struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr; - /* Initialize descriptor memory to 0 */ - memset(ptxdescripmem, 0, txdescriplen); - memset(prxdescripmem, 0, rxdescriplen); + memset_io(priv->tx_dma_desc, 0, priv->txdescmem); + memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); - iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); - iowrite32(0, &ptxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); - iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); - iowrite32(0, &prxsgdma->control); + csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); } /* For SGDMA, interrupts remain enabled after initially enabling, @@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv) void sgdma_clear_rxirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); } void sgdma_clear_txirq(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); + tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), + SGDMA_CTRLREG_CLRINT); } /* transmits buffer through SGDMA. Returns number of buffers @@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv) */ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) { - int pktstx = 0; - struct sgdma_descrip *descbase = - (struct sgdma_descrip *)priv->tx_dma_desc; + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->tx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; /* wait 'til the tx sgdma is ready for the next transmit request */ if (sgdma_txbusy(priv)) @@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) 0, /* read fixed */ SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ - pktstx = sgdma_async_write(priv, cdesc); + sgdma_async_write(priv, cdesc); /* enqueue the request to the pending transmit queue */ queue_tx(priv, buffer); @@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) u32 sgdma_tx_completions(struct altera_tse_private *priv) { u32 ready = 0; - struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc; if (!sgdma_txbusy(priv) && - ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && + ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) + & SGDMA_CONTROL_HW_OWNED) == 0) && (dequeue_tx(priv))) { ready = 1; } @@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv, */ u32 sgdma_rx_status(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; - struct sgdma_descrip *desc = NULL; - int pktsrx; - unsigned int rxstatus = 0; - unsigned int pktlength = 0; - unsigned int pktstatus = 0; + struct sgdma_descrip __iomem *base = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *desc = NULL; struct tse_buffer *rxbuffer = NULL; + unsigned int rxstatus = 0; - u32 sts = ioread32(&csr->status); + u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); desc = &base[0]; if (sts & SGDMA_STSREG_EOP) { + unsigned int pktlength = 0; + unsigned int pktstatus = 0; dma_sync_single_for_cpu(priv->device, priv->rxdescphys, priv->sgdmadesclen, DMA_FROM_DEVICE); - pktlength = desc->bytes_xferred; - pktstatus = desc->status & 0x3f; - rxstatus = pktstatus; + pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); + pktstatus = csrrd8(desc, sgdma_descroffs(status)); + rxstatus = pktstatus & ~SGDMA_STATUS_EOP; rxstatus = rxstatus << 16; rxstatus |= (pktlength & 0xffff); if (rxstatus) { - desc->status = 0; + csrwr8(0, desc, sgdma_descroffs(status)); rxbuffer = dequeue_rx(priv); if (rxbuffer == NULL) @@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) "sgdma rx and rx queue empty!\n"); /* Clear control */ - iowrite32(0, &csr->control); + csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); /* clear status */ - iowrite32(0xf, &csr->status); + csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); /* kick the rx sgdma after reaping this descriptor */ - pktsrx = sgdma_async_read(priv); + sgdma_async_read(priv); } else { /* If the SGDMA indicated an end of packet on recv, @@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) */ netdev_err(priv->dev, "SGDMA RX Error Info: %x, %x, %x\n", - sts, desc->status, rxstatus); + sts, csrrd8(desc, sgdma_descroffs(status)), + rxstatus); } } else if (sts == 0) { - pktsrx = sgdma_async_read(priv); + sgdma_async_read(priv); } return rxstatus; @@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) /* Private functions */ -static void sgdma_setup_descrip(struct sgdma_descrip *desc, - struct sgdma_descrip *ndesc, +static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, + struct sgdma_descrip __iomem *ndesc, dma_addr_t ndesc_phys, dma_addr_t raddr, dma_addr_t waddr, @@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, int wfixed) { /* Clear the next descriptor as not owned by hardware */ - u32 ctrl = ndesc->control; + + u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); ctrl &= ~SGDMA_CONTROL_HW_OWNED; - ndesc->control = ctrl; + csrwr8(ctrl, ndesc, sgdma_descroffs(control)); - ctrl = 0; ctrl = SGDMA_CONTROL_HW_OWNED; ctrl |= generate_eop; ctrl |= rfixed; ctrl |= wfixed; /* Channel is implicitly zero, initialized to 0 by default */ - - desc->raddr = raddr; - desc->waddr = waddr; - desc->next = lower_32_bits(ndesc_phys); - desc->control = ctrl; - desc->status = 0; - desc->rburst = 0; - desc->wburst = 0; - desc->bytes = length; - desc->bytes_xferred = 0; + csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); + csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); + + csrwr32(0, desc, sgdma_descroffs(pad1)); + csrwr32(0, desc, sgdma_descroffs(pad2)); + csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); + + csrwr8(ctrl, desc, sgdma_descroffs(control)); + csrwr8(0, desc, sgdma_descroffs(status)); + csrwr8(0, desc, sgdma_descroffs(wburst)); + csrwr8(0, desc, sgdma_descroffs(rburst)); + csrwr16(length, desc, sgdma_descroffs(bytes)); + csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); } /* If hardware is busy, don't restart async read. @@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, */ static int sgdma_async_read(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - struct sgdma_descrip *descbase = - (struct sgdma_descrip *)priv->rx_dma_desc; + struct sgdma_descrip __iomem *descbase = + (struct sgdma_descrip __iomem *)priv->rx_dma_desc; - struct sgdma_descrip *cdesc = &descbase[0]; - struct sgdma_descrip *ndesc = &descbase[1]; + struct sgdma_descrip __iomem *cdesc = &descbase[0]; + struct sgdma_descrip __iomem *ndesc = &descbase[1]; struct tse_buffer *rxbuffer = NULL; @@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv) priv->sgdmadesclen, DMA_TO_DEVICE); - iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), + priv->rx_dma_csr, + sgdma_csroffs(next_descrip)); - iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), + priv->rx_dma_csr, + sgdma_csroffs(control)); return 1; } @@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv) } static int sgdma_async_write(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; - if (sgdma_txbusy(priv)) return 0; /* clear control and status */ - iowrite32(0, &csr->control); - iowrite32(0x1f, &csr->status); + csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); + csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); dma_sync_single_for_device(priv->device, priv->txdescphys, priv->sgdmadesclen, DMA_TO_DEVICE); - iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), - &csr->next_descrip); + csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), + priv->tx_dma_csr, + sgdma_csroffs(next_descrip)); - iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), - &csr->control); + csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), + priv->tx_dma_csr, + sgdma_csroffs(control)); return 1; } static dma_addr_t sgdma_txphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->txdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; @@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv, static dma_addr_t sgdma_rxphysaddr(struct altera_tse_private *priv, - struct sgdma_descrip *desc) + struct sgdma_descrip __iomem *desc) { dma_addr_t paddr = priv->rxdescmem_busaddr; uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; @@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv) */ static int sgdma_rxbusy(struct altera_tse_private *priv) { - struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; - return ioread32(&csr->status) & SGDMA_STSREG_BUSY; + return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY; } /* waits for the tx sgdma to finish it's current operation, returns 0 @@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv) static int sgdma_txbusy(struct altera_tse_private *priv) { int delay = 0; - struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; /* if DMA is busy, wait for current transactino to finish */ - while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) + while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) && (delay++ < 100)) udelay(1); - if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { + if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) + & SGDMA_STSREG_BUSY) { netdev_err(priv->dev, "timeout waiting for tx dma\n"); return 1; } diff --git a/drivers/net/ethernet/altera/altera_sgdmahw.h b/drivers/net/ethernet/altera/altera_sgdmahw.h index ba3334f35383..85bc33b218d9 100644 --- a/drivers/net/ethernet/altera/altera_sgdmahw.h +++ b/drivers/net/ethernet/altera/altera_sgdmahw.h @@ -19,16 +19,16 @@ /* SGDMA descriptor structure */ struct sgdma_descrip { - unsigned int raddr; /* address of data to be read */ - unsigned int pad1; - unsigned int waddr; - unsigned int pad2; - unsigned int next; - unsigned int pad3; - unsigned short bytes; - unsigned char rburst; - unsigned char wburst; - unsigned short bytes_xferred; /* 16 bits, bytes xferred */ + u32 raddr; /* address of data to be read */ + u32 pad1; + u32 waddr; + u32 pad2; + u32 next; + u32 pad3; + u16 bytes; + u8 rburst; + u8 wburst; + u16 bytes_xferred; /* 16 bits, bytes xferred */ /* bit 0: error * bit 1: length error @@ -39,7 +39,7 @@ struct sgdma_descrip { * bit 6: reserved * bit 7: status eop for recv case */ - unsigned char status; + u8 status; /* bit 0: eop * bit 1: read_fixed @@ -47,7 +47,7 @@ struct sgdma_descrip { * bits 3,4,5,6: Channel (always 0) * bit 7: hardware owned */ - unsigned char control; + u8 control; } __packed; @@ -101,6 +101,8 @@ struct sgdma_csr { u32 pad3[3]; }; +#define sgdma_csroffs(a) (offsetof(struct sgdma_csr, a)) +#define sgdma_descroffs(a) (offsetof(struct sgdma_descrip, a)) #define SGDMA_STSREG_ERR BIT(0) /* Error */ #define SGDMA_STSREG_EOP BIT(1) /* EOP */ diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h index 465c4aabebbd..2adb24d4523c 100644 --- a/drivers/net/ethernet/altera/altera_tse.h +++ b/drivers/net/ethernet/altera/altera_tse.h @@ -357,6 +357,8 @@ struct altera_tse_mac { u32 reserved5[42]; }; +#define tse_csroffs(a) (offsetof(struct altera_tse_mac, a)) + /* Transmit and Receive Command Registers Bit Definitions */ #define ALTERA_TSE_TX_CMD_STAT_OMIT_CRC BIT(17) @@ -487,4 +489,49 @@ struct altera_tse_private { */ void altera_tse_set_ethtool_ops(struct net_device *); +static inline +u32 csrrd32(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readl(paddr); +} + +static inline +u16 csrrd16(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readw(paddr); +} + +static inline +u8 csrrd8(void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + return readb(paddr); +} + +static inline +void csrwr32(u32 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writel(val, paddr); +} + +static inline +void csrwr16(u16 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writew(val, paddr); +} + +static inline +void csrwr8(u8 val, void __iomem *mac, size_t offs) +{ + void __iomem *paddr = (void __iomem *)((uintptr_t)mac + offs); + + writeb(val, paddr); +} + #endif /* __ALTERA_TSE_H__ */ diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c index 76133caffa78..54c25eff7952 100644 --- a/drivers/net/ethernet/altera/altera_tse_ethtool.c +++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c @@ -96,54 +96,89 @@ static void tse_fill_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *buf) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; u64 ext; - buf[0] = ioread32(&mac->frames_transmitted_ok); - buf[1] = ioread32(&mac->frames_received_ok); - buf[2] = ioread32(&mac->frames_check_sequence_errors); - buf[3] = ioread32(&mac->alignment_errors); + buf[0] = csrrd32(priv->mac_dev, + tse_csroffs(frames_transmitted_ok)); + buf[1] = csrrd32(priv->mac_dev, + tse_csroffs(frames_received_ok)); + buf[2] = csrrd32(priv->mac_dev, + tse_csroffs(frames_check_sequence_errors)); + buf[3] = csrrd32(priv->mac_dev, + tse_csroffs(alignment_errors)); /* Extended aOctetsTransmittedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_transmitted_ok) << 32; - ext |= ioread32(&mac->octets_transmitted_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_transmitted_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_transmitted_ok)); buf[4] = ext; /* Extended aOctetsReceivedOK counter */ - ext = (u64) ioread32(&mac->msb_octets_received_ok) << 32; - ext |= ioread32(&mac->octets_received_ok); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_octets_received_ok)) << 32; + + ext |= csrrd32(priv->mac_dev, + tse_csroffs(octets_received_ok)); buf[5] = ext; - buf[6] = ioread32(&mac->tx_pause_mac_ctrl_frames); - buf[7] = ioread32(&mac->rx_pause_mac_ctrl_frames); - buf[8] = ioread32(&mac->if_in_errors); - buf[9] = ioread32(&mac->if_out_errors); - buf[10] = ioread32(&mac->if_in_ucast_pkts); - buf[11] = ioread32(&mac->if_in_multicast_pkts); - buf[12] = ioread32(&mac->if_in_broadcast_pkts); - buf[13] = ioread32(&mac->if_out_discards); - buf[14] = ioread32(&mac->if_out_ucast_pkts); - buf[15] = ioread32(&mac->if_out_multicast_pkts); - buf[16] = ioread32(&mac->if_out_broadcast_pkts); - buf[17] = ioread32(&mac->ether_stats_drop_events); + buf[6] = csrrd32(priv->mac_dev, + tse_csroffs(tx_pause_mac_ctrl_frames)); + buf[7] = csrrd32(priv->mac_dev, + tse_csroffs(rx_pause_mac_ctrl_frames)); + buf[8] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_errors)); + buf[9] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_errors)); + buf[10] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_ucast_pkts)); + buf[11] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_multicast_pkts)); + buf[12] = csrrd32(priv->mac_dev, + tse_csroffs(if_in_broadcast_pkts)); + buf[13] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_discards)); + buf[14] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_ucast_pkts)); + buf[15] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_multicast_pkts)); + buf[16] = csrrd32(priv->mac_dev, + tse_csroffs(if_out_broadcast_pkts)); + buf[17] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_drop_events)); /* Extended etherStatsOctets counter */ - ext = (u64) ioread32(&mac->msb_ether_stats_octets) << 32; - ext |= ioread32(&mac->ether_stats_octets); + ext = (u64) csrrd32(priv->mac_dev, + tse_csroffs(msb_ether_stats_octets)) << 32; + ext |= csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_octets)); buf[18] = ext; - buf[19] = ioread32(&mac->ether_stats_pkts); - buf[20] = ioread32(&mac->ether_stats_undersize_pkts); - buf[21] = ioread32(&mac->ether_stats_oversize_pkts); - buf[22] = ioread32(&mac->ether_stats_pkts_64_octets); - buf[23] = ioread32(&mac->ether_stats_pkts_65to127_octets); - buf[24] = ioread32(&mac->ether_stats_pkts_128to255_octets); - buf[25] = ioread32(&mac->ether_stats_pkts_256to511_octets); - buf[26] = ioread32(&mac->ether_stats_pkts_512to1023_octets); - buf[27] = ioread32(&mac->ether_stats_pkts_1024to1518_octets); - buf[28] = ioread32(&mac->ether_stats_pkts_1519tox_octets); - buf[29] = ioread32(&mac->ether_stats_jabbers); - buf[30] = ioread32(&mac->ether_stats_fragments); + buf[19] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts)); + buf[20] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_undersize_pkts)); + buf[21] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_oversize_pkts)); + buf[22] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_64_octets)); + buf[23] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_65to127_octets)); + buf[24] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_128to255_octets)); + buf[25] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_256to511_octets)); + buf[26] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_512to1023_octets)); + buf[27] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1024to1518_octets)); + buf[28] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_pkts_1519tox_octets)); + buf[29] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_jabbers)); + buf[30] = csrrd32(priv->mac_dev, + tse_csroffs(ether_stats_fragments)); } static int tse_sset_count(struct net_device *dev, int sset) @@ -178,7 +213,6 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, { int i; struct altera_tse_private *priv = netdev_priv(dev); - u32 *tse_mac_regs = (u32 *)priv->mac_dev; u32 *buf = regbuf; /* Set version to a known value, so ethtool knows @@ -196,7 +230,7 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs, regs->version = 1; for (i = 0; i < TSE_NUM_REGS; i++) - buf[i] = ioread32(&tse_mac_regs[i]); + buf[i] = csrrd32(priv->mac_dev, i * 4); } static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index e44a4aeb9701..7330681574d2 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -100,29 +100,30 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv) */ static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { - struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; - unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; - u32 data; + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); /* set MDIO address */ - iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy0_addr)); /* get the data */ - data = ioread32(&mdio_regs[regnum]) & 0xffff; - return data; + return csrrd32(priv->mac_dev, + tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff; } static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { - struct altera_tse_mac *mac = (struct altera_tse_mac *)bus->priv; - unsigned int *mdio_regs = (unsigned int *)&mac->mdio_phy0; + struct net_device *ndev = bus->priv; + struct altera_tse_private *priv = netdev_priv(ndev); /* set MDIO address */ - iowrite32((mii_id & 0x1f), &mac->mdio_phy0_addr); + csrwr32((mii_id & 0x1f), priv->mac_dev, + tse_csroffs(mdio_phy0_addr)); /* write the data */ - iowrite32((u32) value, &mdio_regs[regnum]); + csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4); return 0; } @@ -168,7 +169,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) for (i = 0; i < PHY_MAX_ADDR; i++) mdio->irq[i] = PHY_POLL; - mdio->priv = priv->mac_dev; + mdio->priv = dev; mdio->parent = priv->device; ret = of_mdiobus_register(mdio, mdio_node); @@ -563,7 +564,6 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int nopaged_len = skb_headlen(skb); enum netdev_tx ret = NETDEV_TX_OK; dma_addr_t dma_addr; - int txcomplete = 0; spin_lock_bh(&priv->tx_lock); @@ -599,7 +599,7 @@ static int tse_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_sync_single_for_device(priv->device, buffer->dma_addr, buffer->len, DMA_TO_DEVICE); - txcomplete = priv->dmaops->tx_buffer(priv, buffer); + priv->dmaops->tx_buffer(priv, buffer); skb_tx_timestamp(skb); @@ -698,7 +698,6 @@ static struct phy_device *connect_local_phy(struct net_device *dev) struct altera_tse_private *priv = netdev_priv(dev); struct phy_device *phydev = NULL; char phy_id_fmt[MII_BUS_ID_SIZE + 3]; - int ret; if (priv->phy_addr != POLL_PHY) { snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, @@ -712,6 +711,7 @@ static struct phy_device *connect_local_phy(struct net_device *dev) netdev_err(dev, "Could not attach to PHY\n"); } else { + int ret; phydev = phy_find_first(priv->mdio); if (phydev == NULL) { netdev_err(dev, "No PHY found\n"); @@ -791,7 +791,6 @@ static int init_phy(struct net_device *dev) static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) { - struct altera_tse_mac *mac = priv->mac_dev; u32 msb; u32 lsb; @@ -799,8 +798,8 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) lsb = ((addr[5] << 8) | addr[4]) & 0xffff; /* Set primary MAC address */ - iowrite32(msb, &mac->mac_addr_0); - iowrite32(lsb, &mac->mac_addr_1); + csrwr32(msb, priv->mac_dev, tse_csroffs(mac_addr_0)); + csrwr32(lsb, priv->mac_dev, tse_csroffs(mac_addr_1)); } /* MAC software reset. @@ -811,26 +810,26 @@ static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr) */ static int reset_mac(struct altera_tse_private *priv) { - void __iomem *cmd_cfg_reg = &priv->mac_dev->command_config; int counter; u32 dat; - dat = ioread32(cmd_cfg_reg); + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); dat &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); dat |= MAC_CMDCFG_SW_RESET | MAC_CMDCFG_CNT_RESET; - iowrite32(dat, cmd_cfg_reg); + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); counter = 0; while (counter++ < ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - if (tse_bit_is_clear(cmd_cfg_reg, MAC_CMDCFG_SW_RESET)) + if (tse_bit_is_clear(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_SW_RESET)) break; udelay(1); } if (counter >= ALTERA_TSE_SW_RESET_WATCHDOG_CNTR) { - dat = ioread32(cmd_cfg_reg); + dat = csrrd32(priv->mac_dev, tse_csroffs(command_config)); dat &= ~MAC_CMDCFG_SW_RESET; - iowrite32(dat, cmd_cfg_reg); + csrwr32(dat, priv->mac_dev, tse_csroffs(command_config)); return -1; } return 0; @@ -840,41 +839,57 @@ static int reset_mac(struct altera_tse_private *priv) */ static int init_mac(struct altera_tse_private *priv) { - struct altera_tse_mac *mac = priv->mac_dev; unsigned int cmd = 0; u32 frm_length; /* Setup Rx FIFO */ - iowrite32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, - &mac->rx_section_empty); - iowrite32(ALTERA_TSE_RX_SECTION_FULL, &mac->rx_section_full); - iowrite32(ALTERA_TSE_RX_ALMOST_EMPTY, &mac->rx_almost_empty); - iowrite32(ALTERA_TSE_RX_ALMOST_FULL, &mac->rx_almost_full); + csrwr32(priv->rx_fifo_depth - ALTERA_TSE_RX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(rx_section_empty)); + + csrwr32(ALTERA_TSE_RX_SECTION_FULL, priv->mac_dev, + tse_csroffs(rx_section_full)); + + csrwr32(ALTERA_TSE_RX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(rx_almost_empty)); + + csrwr32(ALTERA_TSE_RX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(rx_almost_full)); /* Setup Tx FIFO */ - iowrite32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, - &mac->tx_section_empty); - iowrite32(ALTERA_TSE_TX_SECTION_FULL, &mac->tx_section_full); - iowrite32(ALTERA_TSE_TX_ALMOST_EMPTY, &mac->tx_almost_empty); - iowrite32(ALTERA_TSE_TX_ALMOST_FULL, &mac->tx_almost_full); + csrwr32(priv->tx_fifo_depth - ALTERA_TSE_TX_SECTION_EMPTY, + priv->mac_dev, tse_csroffs(tx_section_empty)); + + csrwr32(ALTERA_TSE_TX_SECTION_FULL, priv->mac_dev, + tse_csroffs(tx_section_full)); + + csrwr32(ALTERA_TSE_TX_ALMOST_EMPTY, priv->mac_dev, + tse_csroffs(tx_almost_empty)); + + csrwr32(ALTERA_TSE_TX_ALMOST_FULL, priv->mac_dev, + tse_csroffs(tx_almost_full)); /* MAC Address Configuration */ tse_update_mac_addr(priv, priv->dev->dev_addr); /* MAC Function Configuration */ frm_length = ETH_HLEN + priv->dev->mtu + ETH_FCS_LEN; - iowrite32(frm_length, &mac->frm_length); - iowrite32(ALTERA_TSE_TX_IPG_LENGTH, &mac->tx_ipg_length); + csrwr32(frm_length, priv->mac_dev, tse_csroffs(frm_length)); + + csrwr32(ALTERA_TSE_TX_IPG_LENGTH, priv->mac_dev, + tse_csroffs(tx_ipg_length)); /* Disable RX/TX shift 16 for alignment of all received frames on 16-bit * start address */ - tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); - tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | - ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); + tse_set_bit(priv->mac_dev, tse_csroffs(rx_cmd_stat), + ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16); + + tse_clear_bit(priv->mac_dev, tse_csroffs(tx_cmd_stat), + ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 | + ALTERA_TSE_TX_CMD_STAT_OMIT_CRC); /* Set the MAC options */ - cmd = ioread32(&mac->command_config); + cmd = csrrd32(priv->mac_dev, tse_csroffs(command_config)); cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */ cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */ cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames @@ -889,9 +904,10 @@ static int init_mac(struct altera_tse_private *priv) cmd &= ~MAC_CMDCFG_ETH_SPEED; cmd &= ~MAC_CMDCFG_ENA_10; - iowrite32(cmd, &mac->command_config); + csrwr32(cmd, priv->mac_dev, tse_csroffs(command_config)); - iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta); + csrwr32(ALTERA_TSE_PAUSE_QUANTA, priv->mac_dev, + tse_csroffs(pause_quanta)); if (netif_msg_hw(priv)) dev_dbg(priv->device, @@ -904,15 +920,14 @@ static int init_mac(struct altera_tse_private *priv) */ static void tse_set_mac(struct altera_tse_private *priv, bool enable) { - struct altera_tse_mac *mac = priv->mac_dev; - u32 value = ioread32(&mac->command_config); + u32 value = csrrd32(priv->mac_dev, tse_csroffs(command_config)); if (enable) value |= MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA; else value &= ~(MAC_CMDCFG_TX_ENA | MAC_CMDCFG_RX_ENA); - iowrite32(value, &mac->command_config); + csrwr32(value, priv->mac_dev, tse_csroffs(command_config)); } /* Change the MTU @@ -942,13 +957,12 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu) static void altera_tse_set_mcfilter(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; int i; struct netdev_hw_addr *ha; /* clear the hash filter */ for (i = 0; i < 64; i++) - iowrite32(0, &(mac->hash_table[i])); + csrwr32(0, priv->mac_dev, tse_csroffs(hash_table) + i * 4); netdev_for_each_mc_addr(ha, dev) { unsigned int hash = 0; @@ -964,7 +978,7 @@ static void altera_tse_set_mcfilter(struct net_device *dev) hash = (hash << 1) | xor_bit; } - iowrite32(1, &(mac->hash_table[hash])); + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + hash * 4); } } @@ -972,12 +986,11 @@ static void altera_tse_set_mcfilter(struct net_device *dev) static void altera_tse_set_mcfilterall(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; int i; /* set the hash filter */ for (i = 0; i < 64; i++) - iowrite32(1, &(mac->hash_table[i])); + csrwr32(1, priv->mac_dev, tse_csroffs(hash_table) + i * 4); } /* Set or clear the multicast filter for this adaptor @@ -985,12 +998,12 @@ static void altera_tse_set_mcfilterall(struct net_device *dev) static void tse_set_rx_mode_hashfilter(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; spin_lock(&priv->mac_cfg_lock); if (dev->flags & IFF_PROMISC) - tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); if (dev->flags & IFF_ALLMULTI) altera_tse_set_mcfilterall(dev); @@ -1005,15 +1018,16 @@ static void tse_set_rx_mode_hashfilter(struct net_device *dev) static void tse_set_rx_mode(struct net_device *dev) { struct altera_tse_private *priv = netdev_priv(dev); - struct altera_tse_mac *mac = priv->mac_dev; spin_lock(&priv->mac_cfg_lock); if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev) || !netdev_uc_empty(dev)) - tse_set_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_set_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); else - tse_clear_bit(&mac->command_config, MAC_CMDCFG_PROMIS_EN); + tse_clear_bit(priv->mac_dev, tse_csroffs(command_config), + MAC_CMDCFG_PROMIS_EN); spin_unlock(&priv->mac_cfg_lock); } @@ -1362,6 +1376,11 @@ static int altera_tse_probe(struct platform_device *pdev) of_property_read_bool(pdev->dev.of_node, "altr,has-hash-multicast-filter"); + /* Set hash filter to not set for now until the + * multicast filter receive issue is debugged + */ + priv->hash_filter = 0; + /* get supplemental address settings for this instance */ priv->added_unicast = of_property_read_bool(pdev->dev.of_node, @@ -1493,7 +1512,7 @@ static int altera_tse_remove(struct platform_device *pdev) return 0; } -struct altera_dmaops altera_dtype_sgdma = { +static const struct altera_dmaops altera_dtype_sgdma = { .altera_dtype = ALTERA_DTYPE_SGDMA, .dmamask = 32, .reset_dma = sgdma_reset, @@ -1512,7 +1531,7 @@ struct altera_dmaops altera_dtype_sgdma = { .start_rxdma = sgdma_start_rxdma, }; -struct altera_dmaops altera_dtype_msgdma = { +static const struct altera_dmaops altera_dtype_msgdma = { .altera_dtype = ALTERA_DTYPE_MSGDMA, .dmamask = 64, .reset_dma = msgdma_reset, diff --git a/drivers/net/ethernet/altera/altera_utils.c b/drivers/net/ethernet/altera/altera_utils.c index 70fa13f486b2..d7eeb1713ad2 100644 --- a/drivers/net/ethernet/altera/altera_utils.c +++ b/drivers/net/ethernet/altera/altera_utils.c @@ -17,28 +17,28 @@ #include "altera_tse.h" #include "altera_utils.h" -void tse_set_bit(void __iomem *ioaddr, u32 bit_mask) +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); value |= bit_mask; - iowrite32(value, ioaddr); + csrwr32(value, ioaddr, offs); } -void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask) +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); value &= ~bit_mask; - iowrite32(value, ioaddr); + csrwr32(value, ioaddr, offs); } -int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask) +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 1 : 0; } -int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask) +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask) { - u32 value = ioread32(ioaddr); + u32 value = csrrd32(ioaddr, offs); return (value & bit_mask) ? 0 : 1; } diff --git a/drivers/net/ethernet/altera/altera_utils.h b/drivers/net/ethernet/altera/altera_utils.h index ce1db36d3583..baf100ccf587 100644 --- a/drivers/net/ethernet/altera/altera_utils.h +++ b/drivers/net/ethernet/altera/altera_utils.h @@ -19,9 +19,9 @@ #ifndef __ALTERA_UTILS_H__ #define __ALTERA_UTILS_H__ -void tse_set_bit(void __iomem *ioaddr, u32 bit_mask); -void tse_clear_bit(void __iomem *ioaddr, u32 bit_mask); -int tse_bit_is_set(void __iomem *ioaddr, u32 bit_mask); -int tse_bit_is_clear(void __iomem *ioaddr, u32 bit_mask); +void tse_set_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +void tse_clear_bit(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_set(void __iomem *ioaddr, size_t offs, u32 bit_mask); +int tse_bit_is_clear(void __iomem *ioaddr, size_t offs, u32 bit_mask); #endif /* __ALTERA_UTILS_H__*/ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b260913db236..3b0d43154e67 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -10051,8 +10051,8 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp, #define BCM_5710_UNDI_FW_MF_MAJOR (0x07) #define BCM_5710_UNDI_FW_MF_MINOR (0x08) #define BCM_5710_UNDI_FW_MF_VERS (0x05) -#define BNX2X_PREV_UNDI_MF_PORT(p) (0x1a150c + ((p) << 4)) -#define BNX2X_PREV_UNDI_MF_FUNC(f) (0x1a184c + ((f) << 4)) +#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4)) +#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4)) static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp) { u8 major, minor, version; @@ -10352,6 +10352,7 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) /* Reset should be performed after BRB is emptied */ if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) { u32 timer_count = 1000; + bool need_write = true; /* Close the MAC Rx to prevent BRB from filling up */ bnx2x_prev_unload_close_mac(bp, &mac_vals); @@ -10398,7 +10399,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp) * cleaning methods - might be redundant but harmless. */ if (bnx2x_prev_unload_undi_fw_supports_mf(bp)) { - bnx2x_prev_unload_undi_mf(bp); + if (need_write) { + bnx2x_prev_unload_undi_mf(bp); + need_write = false; + } } else if (prev_undi) { /* If UNDI resides in memory, * manually increment it diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 81cc2d9831c2..b8078d50261b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -2695,7 +2695,7 @@ out: bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); } - return 0; + return rc; } int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c index 0c067e8564dd..784c7155b98a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c @@ -747,7 +747,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set) out: bnx2x_vfpf_finalize(bp, &req->first_tlv); - return 0; + return rc; } /* request pf to config rss table for vf queues*/ diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c new file mode 100644 index 000000000000..4884205e56ee --- /dev/null +++ b/drivers/net/ethernet/ec_bhf.c @@ -0,0 +1,706 @@ + /* + * drivers/net/ethernet/beckhoff/ec_bhf.c + * + * Copyright (C) 2014 Darek Marcinkiewicz <reksio@newterm.pl> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* This is a driver for EtherCAT master module present on CCAT FPGA. + * Those can be found on Bechhoff CX50xx industrial PCs. + */ + +#if 0 +#define DEBUG +#endif +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/pci.h> +#include <linux/init.h> + +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/skbuff.h> +#include <linux/hrtimer.h> +#include <linux/interrupt.h> +#include <linux/stat.h> + +#define TIMER_INTERVAL_NSEC 20000 + +#define INFO_BLOCK_SIZE 0x10 +#define INFO_BLOCK_TYPE 0x0 +#define INFO_BLOCK_REV 0x2 +#define INFO_BLOCK_BLK_CNT 0x4 +#define INFO_BLOCK_TX_CHAN 0x4 +#define INFO_BLOCK_RX_CHAN 0x5 +#define INFO_BLOCK_OFFSET 0x8 + +#define EC_MII_OFFSET 0x4 +#define EC_FIFO_OFFSET 0x8 +#define EC_MAC_OFFSET 0xc + +#define MAC_FRAME_ERR_CNT 0x0 +#define MAC_RX_ERR_CNT 0x1 +#define MAC_CRC_ERR_CNT 0x2 +#define MAC_LNK_LST_ERR_CNT 0x3 +#define MAC_TX_FRAME_CNT 0x10 +#define MAC_RX_FRAME_CNT 0x14 +#define MAC_TX_FIFO_LVL 0x20 +#define MAC_DROPPED_FRMS 0x28 +#define MAC_CONNECTED_CCAT_FLAG 0x78 + +#define MII_MAC_ADDR 0x8 +#define MII_MAC_FILT_FLAG 0xe +#define MII_LINK_STATUS 0xf + +#define FIFO_TX_REG 0x0 +#define FIFO_TX_RESET 0x8 +#define FIFO_RX_REG 0x10 +#define FIFO_RX_ADDR_VALID (1u << 31) +#define FIFO_RX_RESET 0x18 + +#define DMA_CHAN_OFFSET 0x1000 +#define DMA_CHAN_SIZE 0x8 + +#define DMA_WINDOW_SIZE_MASK 0xfffffffc + +static struct pci_device_id ids[] = { + { PCI_DEVICE(0x15ec, 0x5000), }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ids); + +struct rx_header { +#define RXHDR_NEXT_ADDR_MASK 0xffffffu +#define RXHDR_NEXT_VALID (1u << 31) + __le32 next; +#define RXHDR_NEXT_RECV_FLAG 0x1 + __le32 recv; +#define RXHDR_LEN_MASK 0xfffu + __le16 len; + __le16 port; + __le32 reserved; + u8 timestamp[8]; +} __packed; + +#define PKT_PAYLOAD_SIZE 0x7e8 +struct rx_desc { + struct rx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +struct tx_header { + __le16 len; +#define TX_HDR_PORT_0 0x1 +#define TX_HDR_PORT_1 0x2 + u8 port; + u8 ts_enable; +#define TX_HDR_SENT 0x1 + __le32 sent; + u8 timestamp[8]; +} __packed; + +struct tx_desc { + struct tx_header header; + u8 data[PKT_PAYLOAD_SIZE]; +} __packed; + +#define FIFO_SIZE 64 + +static long polling_frequency = TIMER_INTERVAL_NSEC; + +struct bhf_dma { + u8 *buf; + size_t len; + dma_addr_t buf_phys; + + u8 *alloc; + size_t alloc_len; + dma_addr_t alloc_phys; +}; + +struct ec_bhf_priv { + struct net_device *net_dev; + + struct pci_dev *dev; + + void * __iomem io; + void * __iomem dma_io; + + struct hrtimer hrtimer; + + int tx_dma_chan; + int rx_dma_chan; + void * __iomem ec_io; + void * __iomem fifo_io; + void * __iomem mii_io; + void * __iomem mac_io; + + struct bhf_dma rx_buf; + struct rx_desc *rx_descs; + int rx_dnext; + int rx_dcount; + + struct bhf_dma tx_buf; + struct tx_desc *tx_descs; + int tx_dcount; + int tx_dnext; + + u64 stat_rx_bytes; + u64 stat_tx_bytes; +}; + +#define PRIV_TO_DEV(priv) (&(priv)->dev->dev) + +#define ETHERCAT_MASTER_ID 0x14 + +static void ec_bhf_print_status(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + + dev_dbg(dev, "Frame error counter: %d\n", + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT)); + dev_dbg(dev, "RX error counter: %d\n", + ioread8(priv->mac_io + MAC_RX_ERR_CNT)); + dev_dbg(dev, "CRC error counter: %d\n", + ioread8(priv->mac_io + MAC_CRC_ERR_CNT)); + dev_dbg(dev, "TX frame counter: %d\n", + ioread32(priv->mac_io + MAC_TX_FRAME_CNT)); + dev_dbg(dev, "RX frame counter: %d\n", + ioread32(priv->mac_io + MAC_RX_FRAME_CNT)); + dev_dbg(dev, "TX fifo level: %d\n", + ioread8(priv->mac_io + MAC_TX_FIFO_LVL)); + dev_dbg(dev, "Dropped frames: %d\n", + ioread8(priv->mac_io + MAC_DROPPED_FRMS)); + dev_dbg(dev, "Connected with CCAT slot: %d\n", + ioread8(priv->mac_io + MAC_CONNECTED_CCAT_FLAG)); + dev_dbg(dev, "Link status: %d\n", + ioread8(priv->mii_io + MII_LINK_STATUS)); +} + +static void ec_bhf_reset(struct ec_bhf_priv *priv) +{ + iowrite8(0, priv->mac_io + MAC_FRAME_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_RX_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_CRC_ERR_CNT); + iowrite8(0, priv->mac_io + MAC_LNK_LST_ERR_CNT); + iowrite32(0, priv->mac_io + MAC_TX_FRAME_CNT); + iowrite32(0, priv->mac_io + MAC_RX_FRAME_CNT); + iowrite8(0, priv->mac_io + MAC_DROPPED_FRMS); + + iowrite8(0, priv->fifo_io + FIFO_TX_RESET); + iowrite8(0, priv->fifo_io + FIFO_RX_RESET); + + iowrite8(0, priv->mac_io + MAC_TX_FIFO_LVL); +} + +static void ec_bhf_send_packet(struct ec_bhf_priv *priv, struct tx_desc *desc) +{ + u32 len = le16_to_cpu(desc->header.len) + sizeof(desc->header); + u32 addr = (u8 *)desc - priv->tx_buf.buf; + + iowrite32((ALIGN(len, 8) << 24) | addr, priv->fifo_io + FIFO_TX_REG); + + dev_dbg(PRIV_TO_DEV(priv), "Done sending packet\n"); +} + +static int ec_bhf_desc_sent(struct tx_desc *desc) +{ + return le32_to_cpu(desc->header.sent) & TX_HDR_SENT; +} + +static void ec_bhf_process_tx(struct ec_bhf_priv *priv) +{ + if (unlikely(netif_queue_stopped(priv->net_dev))) { + /* Make sure that we perceive changes to tx_dnext. */ + smp_rmb(); + + if (ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) + netif_wake_queue(priv->net_dev); + } +} + +static int ec_bhf_pkt_received(struct rx_desc *desc) +{ + return le32_to_cpu(desc->header.recv) & RXHDR_NEXT_RECV_FLAG; +} + +static void ec_bhf_add_rx_desc(struct ec_bhf_priv *priv, struct rx_desc *desc) +{ + iowrite32(FIFO_RX_ADDR_VALID | ((u8 *)(desc) - priv->rx_buf.buf), + priv->fifo_io + FIFO_RX_REG); +} + +static void ec_bhf_process_rx(struct ec_bhf_priv *priv) +{ + struct rx_desc *desc = &priv->rx_descs[priv->rx_dnext]; + struct device *dev = PRIV_TO_DEV(priv); + + while (ec_bhf_pkt_received(desc)) { + int pkt_size = (le16_to_cpu(desc->header.len) & + RXHDR_LEN_MASK) - sizeof(struct rx_header) - 4; + u8 *data = desc->data; + struct sk_buff *skb; + + skb = netdev_alloc_skb_ip_align(priv->net_dev, pkt_size); + dev_dbg(dev, "Received packet, size: %d\n", pkt_size); + + if (skb) { + memcpy(skb_put(skb, pkt_size), data, pkt_size); + skb->protocol = eth_type_trans(skb, priv->net_dev); + dev_dbg(dev, "Protocol type: %x\n", skb->protocol); + + priv->stat_rx_bytes += pkt_size; + + netif_rx(skb); + } else { + dev_err_ratelimited(dev, + "Couldn't allocate a skb_buff for a packet of size %u\n", + pkt_size); + } + + desc->header.recv = 0; + + ec_bhf_add_rx_desc(priv, desc); + + priv->rx_dnext = (priv->rx_dnext + 1) % priv->rx_dcount; + desc = &priv->rx_descs[priv->rx_dnext]; + } + +} + +static enum hrtimer_restart ec_bhf_timer_fun(struct hrtimer *timer) +{ + struct ec_bhf_priv *priv = container_of(timer, struct ec_bhf_priv, + hrtimer); + ec_bhf_process_rx(priv); + ec_bhf_process_tx(priv); + + if (!netif_running(priv->net_dev)) + return HRTIMER_NORESTART; + + hrtimer_forward_now(timer, ktime_set(0, polling_frequency)); + return HRTIMER_RESTART; +} + +static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv) +{ + struct device *dev = PRIV_TO_DEV(priv); + unsigned block_count, i; + void * __iomem ec_info; + + dev_dbg(dev, "Info block:\n"); + dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io)); + dev_dbg(dev, "Revision of function: %x\n", + (unsigned)ioread16(priv->io + INFO_BLOCK_REV)); + + block_count = ioread8(priv->io + INFO_BLOCK_BLK_CNT); + dev_dbg(dev, "Number of function blocks: %x\n", block_count); + + for (i = 0; i < block_count; i++) { + u16 type = ioread16(priv->io + i * INFO_BLOCK_SIZE + + INFO_BLOCK_TYPE); + if (type == ETHERCAT_MASTER_ID) + break; + } + if (i == block_count) { + dev_err(dev, "EtherCAT master with DMA block not found\n"); + return -ENODEV; + } + dev_dbg(dev, "EtherCAT master with DMA block found at pos: %d\n", i); + + ec_info = priv->io + i * INFO_BLOCK_SIZE; + dev_dbg(dev, "EtherCAT master revision: %d\n", + ioread16(ec_info + INFO_BLOCK_REV)); + + priv->tx_dma_chan = ioread8(ec_info + INFO_BLOCK_TX_CHAN); + dev_dbg(dev, "EtherCAT master tx dma channel: %d\n", + priv->tx_dma_chan); + + priv->rx_dma_chan = ioread8(ec_info + INFO_BLOCK_RX_CHAN); + dev_dbg(dev, "EtherCAT master rx dma channel: %d\n", + priv->rx_dma_chan); + + priv->ec_io = priv->io + ioread32(ec_info + INFO_BLOCK_OFFSET); + priv->mii_io = priv->ec_io + ioread32(priv->ec_io + EC_MII_OFFSET); + priv->fifo_io = priv->ec_io + ioread32(priv->ec_io + EC_FIFO_OFFSET); + priv->mac_io = priv->ec_io + ioread32(priv->ec_io + EC_MAC_OFFSET); + + dev_dbg(dev, + "EtherCAT block addres: %p, fifo address: %p, mii address: %p, mac address: %p\n", + priv->ec_io, priv->fifo_io, priv->mii_io, priv->mac_io); + + return 0; +} + +static netdev_tx_t ec_bhf_start_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct tx_desc *desc; + unsigned len; + + dev_dbg(PRIV_TO_DEV(priv), "Starting xmit\n"); + + desc = &priv->tx_descs[priv->tx_dnext]; + + skb_copy_and_csum_dev(skb, desc->data); + len = skb->len; + + memset(&desc->header, 0, sizeof(desc->header)); + desc->header.len = cpu_to_le16(len); + desc->header.port = TX_HDR_PORT_0; + + ec_bhf_send_packet(priv, desc); + + priv->tx_dnext = (priv->tx_dnext + 1) % priv->tx_dcount; + + if (!ec_bhf_desc_sent(&priv->tx_descs[priv->tx_dnext])) { + /* Make sure that update updates to tx_dnext are perceived + * by timer routine. + */ + smp_wmb(); + + netif_stop_queue(net_dev); + + dev_dbg(PRIV_TO_DEV(priv), "Stopping netif queue\n"); + ec_bhf_print_status(priv); + } + + priv->stat_tx_bytes += len; + + dev_kfree_skb(skb); + + return NETDEV_TX_OK; +} + +static int ec_bhf_alloc_dma_mem(struct ec_bhf_priv *priv, + struct bhf_dma *buf, + int channel, + int size) +{ + int offset = channel * DMA_CHAN_SIZE + DMA_CHAN_OFFSET; + struct device *dev = PRIV_TO_DEV(priv); + u32 mask; + + iowrite32(0xffffffff, priv->dma_io + offset); + + mask = ioread32(priv->dma_io + offset); + mask &= DMA_WINDOW_SIZE_MASK; + dev_dbg(dev, "Read mask %x for channel %d\n", mask, channel); + + /* We want to allocate a chunk of memory that is: + * - aligned to the mask we just read + * - is of size 2^mask bytes (at most) + * In order to ensure that we will allocate buffer of + * 2 * 2^mask bytes. + */ + buf->len = min_t(int, ~mask + 1, size); + buf->alloc_len = 2 * buf->len; + + dev_dbg(dev, "Allocating %d bytes for channel %d", + (int)buf->alloc_len, channel); + buf->alloc = dma_alloc_coherent(dev, buf->alloc_len, &buf->alloc_phys, + GFP_KERNEL); + if (buf->alloc == NULL) { + dev_info(dev, "Failed to allocate buffer\n"); + return -ENOMEM; + } + + buf->buf_phys = (buf->alloc_phys + buf->len) & mask; + buf->buf = buf->alloc + (buf->buf_phys - buf->alloc_phys); + + iowrite32(0, priv->dma_io + offset + 4); + iowrite32(buf->buf_phys, priv->dma_io + offset); + dev_dbg(dev, "Buffer: %x and read from dev: %x", + (unsigned)buf->buf_phys, ioread32(priv->dma_io + offset)); + + return 0; +} + +static void ec_bhf_setup_tx_descs(struct ec_bhf_priv *priv) +{ + int i = 0; + + priv->tx_dcount = priv->tx_buf.len / sizeof(struct tx_desc); + priv->tx_descs = (struct tx_desc *) priv->tx_buf.buf; + priv->tx_dnext = 0; + + for (i = 0; i < priv->tx_dcount; i++) + priv->tx_descs[i].header.sent = cpu_to_le32(TX_HDR_SENT); +} + +static void ec_bhf_setup_rx_descs(struct ec_bhf_priv *priv) +{ + int i; + + priv->rx_dcount = priv->rx_buf.len / sizeof(struct rx_desc); + priv->rx_descs = (struct rx_desc *) priv->rx_buf.buf; + priv->rx_dnext = 0; + + for (i = 0; i < priv->rx_dcount; i++) { + struct rx_desc *desc = &priv->rx_descs[i]; + u32 next; + + if (i != priv->rx_dcount - 1) + next = (u8 *)(desc + 1) - priv->rx_buf.buf; + else + next = 0; + next |= RXHDR_NEXT_VALID; + desc->header.next = cpu_to_le32(next); + desc->header.recv = 0; + ec_bhf_add_rx_desc(priv, desc); + } +} + +static int ec_bhf_open(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + int err = 0; + + dev_info(dev, "Opening device\n"); + + ec_bhf_reset(priv); + + err = ec_bhf_alloc_dma_mem(priv, &priv->rx_buf, priv->rx_dma_chan, + FIFO_SIZE * sizeof(struct rx_desc)); + if (err) { + dev_err(dev, "Failed to allocate rx buffer\n"); + goto out; + } + ec_bhf_setup_rx_descs(priv); + + dev_info(dev, "RX buffer allocated, address: %x\n", + (unsigned)priv->rx_buf.buf_phys); + + err = ec_bhf_alloc_dma_mem(priv, &priv->tx_buf, priv->tx_dma_chan, + FIFO_SIZE * sizeof(struct tx_desc)); + if (err) { + dev_err(dev, "Failed to allocate tx buffer\n"); + goto error_rx_free; + } + dev_dbg(dev, "TX buffer allocated, addres: %x\n", + (unsigned)priv->tx_buf.buf_phys); + + iowrite8(0, priv->mii_io + MII_MAC_FILT_FLAG); + + ec_bhf_setup_tx_descs(priv); + + netif_start_queue(net_dev); + + hrtimer_init(&priv->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + priv->hrtimer.function = ec_bhf_timer_fun; + hrtimer_start(&priv->hrtimer, ktime_set(0, polling_frequency), + HRTIMER_MODE_REL); + + dev_info(PRIV_TO_DEV(priv), "Device open\n"); + + ec_bhf_print_status(priv); + + return 0; + +error_rx_free: + dma_free_coherent(dev, priv->rx_buf.alloc_len, priv->rx_buf.alloc, + priv->rx_buf.alloc_len); +out: + return err; +} + +static int ec_bhf_stop(struct net_device *net_dev) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + struct device *dev = PRIV_TO_DEV(priv); + + hrtimer_cancel(&priv->hrtimer); + + ec_bhf_reset(priv); + + netif_tx_disable(net_dev); + + dma_free_coherent(dev, priv->tx_buf.alloc_len, + priv->tx_buf.alloc, priv->tx_buf.alloc_phys); + dma_free_coherent(dev, priv->rx_buf.alloc_len, + priv->rx_buf.alloc, priv->rx_buf.alloc_phys); + + return 0; +} + +static struct rtnl_link_stats64 * +ec_bhf_get_stats(struct net_device *net_dev, + struct rtnl_link_stats64 *stats) +{ + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + stats->rx_errors = ioread8(priv->mac_io + MAC_RX_ERR_CNT) + + ioread8(priv->mac_io + MAC_CRC_ERR_CNT) + + ioread8(priv->mac_io + MAC_FRAME_ERR_CNT); + stats->rx_packets = ioread32(priv->mac_io + MAC_RX_FRAME_CNT); + stats->tx_packets = ioread32(priv->mac_io + MAC_TX_FRAME_CNT); + stats->rx_dropped = ioread8(priv->mac_io + MAC_DROPPED_FRMS); + + stats->tx_bytes = priv->stat_tx_bytes; + stats->rx_bytes = priv->stat_rx_bytes; + + return stats; +} + +static const struct net_device_ops ec_bhf_netdev_ops = { + .ndo_start_xmit = ec_bhf_start_xmit, + .ndo_open = ec_bhf_open, + .ndo_stop = ec_bhf_stop, + .ndo_get_stats64 = ec_bhf_get_stats, + .ndo_change_mtu = eth_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr +}; + +static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct net_device *net_dev; + struct ec_bhf_priv *priv; + void * __iomem dma_io; + void * __iomem io; + int err = 0; + + err = pci_enable_device(dev); + if (err) + return err; + + pci_set_master(dev); + + err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + err = -EIO; + goto err_disable_dev; + } + + err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&dev->dev, + "Required dma mask not supported, failed to initialize device\n"); + goto err_disable_dev; + } + + err = pci_request_regions(dev, "ec_bhf"); + if (err) { + dev_err(&dev->dev, "Failed to request pci memory regions\n"); + goto err_disable_dev; + } + + io = pci_iomap(dev, 0, 0); + if (!io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 0"); + err = -EIO; + goto err_release_regions; + } + + dma_io = pci_iomap(dev, 2, 0); + if (!dma_io) { + dev_err(&dev->dev, "Failed to map pci card memory bar 2"); + err = -EIO; + goto err_unmap; + } + + net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv)); + if (net_dev == 0) { + err = -ENOMEM; + goto err_unmap_dma_io; + } + + pci_set_drvdata(dev, net_dev); + SET_NETDEV_DEV(net_dev, &dev->dev); + + net_dev->features = 0; + net_dev->flags |= IFF_NOARP; + + net_dev->netdev_ops = &ec_bhf_netdev_ops; + + priv = netdev_priv(net_dev); + priv->net_dev = net_dev; + priv->io = io; + priv->dma_io = dma_io; + priv->dev = dev; + + err = ec_bhf_setup_offsets(priv); + if (err < 0) + goto err_free_net_dev; + + memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6); + + dev_dbg(&dev->dev, "CX5020 Ethercat master address: %pM\n", + net_dev->dev_addr); + + err = register_netdev(net_dev); + if (err < 0) + goto err_free_net_dev; + + return 0; + +err_free_net_dev: + free_netdev(net_dev); +err_unmap_dma_io: + pci_iounmap(dev, dma_io); +err_unmap: + pci_iounmap(dev, io); +err_release_regions: + pci_release_regions(dev); +err_disable_dev: + pci_clear_master(dev); + pci_disable_device(dev); + + return err; +} + +static void ec_bhf_remove(struct pci_dev *dev) +{ + struct net_device *net_dev = pci_get_drvdata(dev); + struct ec_bhf_priv *priv = netdev_priv(net_dev); + + unregister_netdev(net_dev); + free_netdev(net_dev); + + pci_iounmap(dev, priv->dma_io); + pci_iounmap(dev, priv->io); + pci_release_regions(dev); + pci_clear_master(dev); + pci_disable_device(dev); +} + +static struct pci_driver pci_driver = { + .name = "ec_bhf", + .id_table = ids, + .probe = ec_bhf_probe, + .remove = ec_bhf_remove, +}; + +static int __init ec_bhf_init(void) +{ + return pci_register_driver(&pci_driver); +} + +static void __exit ec_bhf_exit(void) +{ + pci_unregister_driver(&pci_driver); +} + +module_init(ec_bhf_init); +module_exit(ec_bhf_exit); + +module_param(polling_frequency, long, S_IRUGO); +MODULE_PARM_DESC(polling_frequency, "Polling timer frequency in ns"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Dariusz Marcinkiewicz <reksio@newterm.pl>"); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index a18645407d21..dc19bc5dec77 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4949,6 +4949,12 @@ static void be_eeh_resume(struct pci_dev *pdev) if (status) goto err; + /* On some BE3 FW versions, after a HW reset, + * interrupts will remain disabled for each function. + * So, explicitly enable interrupts + */ + be_intr_set(adapter, true); + /* tell fw we're ready to fire cmds */ status = be_cmd_fw_init(adapter); if (status) diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index b0c6050479eb..b78378cea5e3 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -1988,7 +1988,7 @@ jme_alloc_txdesc(struct jme_adapter *jme, return idx; } -static void +static int jme_fill_tx_map(struct pci_dev *pdev, struct txdesc *txdesc, struct jme_buffer_info *txbi, @@ -2005,6 +2005,9 @@ jme_fill_tx_map(struct pci_dev *pdev, len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) + return -EINVAL; + pci_dma_sync_single_for_device(pdev, dmaaddr, len, @@ -2021,9 +2024,30 @@ jme_fill_tx_map(struct pci_dev *pdev, txbi->mapping = dmaaddr; txbi->len = len; + return 0; } -static void +static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) +{ + struct jme_ring *txring = &(jme->txring[0]); + struct jme_buffer_info *txbi = txring->bufinf, *ctxbi; + int mask = jme->tx_ring_mask; + int j; + + for (j = 0 ; j < count ; j++) { + ctxbi = txbi + ((startidx + j + 2) & (mask)); + pci_unmap_page(jme->pdev, + ctxbi->mapping, + ctxbi->len, + PCI_DMA_TODEVICE); + + ctxbi->mapping = 0; + ctxbi->len = 0; + } + +} + +static int jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) { struct jme_ring *txring = &(jme->txring[0]); @@ -2034,25 +2058,37 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx) int mask = jme->tx_ring_mask; const struct skb_frag_struct *frag; u32 len; + int ret = 0; for (i = 0 ; i < nr_frags ; ++i) { frag = &skb_shinfo(skb)->frags[i]; ctxdesc = txdesc + ((idx + i + 2) & (mask)); ctxbi = txbi + ((idx + i + 2) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, skb_frag_page(frag), frag->page_offset, skb_frag_size(frag), hidma); + if (ret) { + jme_drop_tx_map(jme, idx, i); + goto out; + } + } len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len; ctxdesc = txdesc + ((idx + 1) & (mask)); ctxbi = txbi + ((idx + 1) & (mask)); - jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), + ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, virt_to_page(skb->data), offset_in_page(skb->data), len, hidma); + if (ret) + jme_drop_tx_map(jme, idx, i); + +out: + return ret; } + static int jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags) { @@ -2131,6 +2167,7 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) struct txdesc *txdesc; struct jme_buffer_info *txbi; u8 flags; + int ret = 0; txdesc = (struct txdesc *)txring->desc + idx; txbi = txring->bufinf + idx; @@ -2155,7 +2192,10 @@ jme_fill_tx_desc(struct jme_adapter *jme, struct sk_buff *skb, int idx) if (jme_tx_tso(skb, &txdesc->desc1.mss, &flags)) jme_tx_csum(jme, skb, &flags); jme_tx_vlan(skb, &txdesc->desc1.vlan, &flags); - jme_map_tx_skb(jme, skb, idx); + ret = jme_map_tx_skb(jme, skb, idx); + if (ret) + return ret; + txdesc->desc1.flags = flags; /* * Set tx buffer info after telling NIC to send @@ -2228,7 +2268,8 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev) return NETDEV_TX_BUSY; } - jme_fill_tx_desc(jme, skb, idx); + if (jme_fill_tx_desc(jme, skb, idx)) + return NETDEV_TX_OK; jwrite32(jme, JME_TXCS, jme->reg_txcs | TXCS_SELECT_QUEUE0 | diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 78099eab7673..92d3249f63f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1253,12 +1253,12 @@ static struct mlx4_cmd_info cmd_info[] = { }, { .opcode = MLX4_CMD_UPDATE_QP, - .has_inbox = false, + .has_inbox = true, .has_outbox = false, .out_is_imm = false, .encode_slave_id = false, .verify = NULL, - .wrapper = mlx4_CMD_EPERM_wrapper + .wrapper = mlx4_UPDATE_QP_wrapper }, { .opcode = MLX4_CMD_GET_OP_REQ, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index f9c465101963..212cea440f90 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1195,6 +1195,12 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); + int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 61d64ebffd56..fbd32af89c7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -389,6 +389,41 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); +#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC +int mlx4_update_qp(struct mlx4_dev *dev, struct mlx4_qp *qp, + enum mlx4_update_qp_attr attr, + struct mlx4_update_qp_params *params) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_update_qp_context *cmd; + u64 pri_addr_path_mask = 0; + int err = 0; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + cmd = (struct mlx4_update_qp_context *)mailbox->buf; + + if (!attr || (attr & ~MLX4_UPDATE_QP_SUPPORTED_ATTRS)) + return -EINVAL; + + if (attr & MLX4_UPDATE_QP_SMAC) { + pri_addr_path_mask |= 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX; + cmd->qp_context.pri_path.grh_mylmc = params->smac_index; + } + + cmd->primary_addr_path_mask = cpu_to_be64(pri_addr_path_mask); + + err = mlx4_cmd(dev, mailbox->dma, qp->qpn & 0xffffff, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_update_qp); + void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp) { struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 1c3fdd4a1f7d..8f1254a79832 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3895,6 +3895,60 @@ static int add_eth_header(struct mlx4_dev *dev, int slave, } +#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX) +int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd_info) +{ + int err; + u32 qpn = vhcr->in_modifier & 0xffffff; + struct res_qp *rqp; + u64 mac; + unsigned port; + u64 pri_addr_path_mask; + struct mlx4_update_qp_context *cmd; + int smac_index; + + cmd = (struct mlx4_update_qp_context *)inbox->buf; + + pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask); + if (cmd->qp_mask || cmd->secondary_addr_path_mask || + (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED)) + return -EPERM; + + /* Just change the smac for the QP */ + err = get_res(dev, slave, qpn, RES_QP, &rqp); + if (err) { + mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave); + return err; + } + + port = (rqp->sched_queue >> 6 & 1) + 1; + smac_index = cmd->qp_context.pri_path.grh_mylmc; + err = mac_find_smac_ix_in_slave(dev, slave, port, + smac_index, &mac); + if (err) { + mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n", + qpn, smac_index); + goto err_mac; + } + + err = mlx4_cmd(dev, inbox->dma, + vhcr->in_modifier, 0, + MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) { + mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn); + goto err_mac; + } + +err_mac: + put_res(dev, slave, qpn, RES_QP); + return err; +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7b52a88923ef..f785d01c7d12 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -1719,22 +1719,6 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring) tx_ring->producer; } -static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, - struct net_device *netdev) -{ - int err; - - netdev->num_tx_queues = adapter->drv_tx_rings; - netdev->real_num_tx_queues = adapter->drv_tx_rings; - - err = netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); - if (err) - netdev_err(netdev, "failed to set %d Tx queues\n", - adapter->drv_tx_rings); - - return err; -} - struct qlcnic_nic_template { int (*config_bridged_mode) (struct qlcnic_adapter *, u32); int (*config_led) (struct qlcnic_adapter *, u32, u32); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 0bc914859e38..7e55e88a81bf 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2206,6 +2206,31 @@ static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *adapter) ahw->max_uc_count = count; } +static int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter, + u8 tx_queues, u8 rx_queues) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + + if (tx_queues) { + err = netif_set_real_num_tx_queues(netdev, tx_queues); + if (err) { + netdev_err(netdev, "failed to set %d Tx queues\n", + tx_queues); + return err; + } + } + + if (rx_queues) { + err = netif_set_real_num_rx_queues(netdev, rx_queues); + if (err) + netdev_err(netdev, "failed to set %d Rx queues\n", + rx_queues); + } + + return err; +} + int qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, int pci_using_dac) @@ -2269,7 +2294,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->irq = adapter->msix_entries[0].vector; - err = qlcnic_set_real_num_queues(adapter, netdev); + err = qlcnic_set_real_num_queues(adapter, adapter->drv_tx_rings, + adapter->drv_sds_rings); if (err) return err; @@ -2943,9 +2969,13 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter) tx_ring->tx_stats.xmit_called, tx_ring->tx_stats.xmit_on, tx_ring->tx_stats.xmit_off); + + if (tx_ring->crb_intr_mask) + netdev_info(netdev, "crb_intr_mask=%d\n", + readl(tx_ring->crb_intr_mask)); + netdev_info(netdev, - "crb_intr_mask=%d, hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", - readl(tx_ring->crb_intr_mask), + "hw_producer=%d, sw_producer=%d sw_consumer=%d, hw_consumer=%d\n", readl(tx_ring->crb_cmd_producer), tx_ring->producer, tx_ring->sw_consumer, le32_to_cpu(*(tx_ring->hw_consumer))); @@ -3978,12 +4008,21 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, int qlcnic_setup_rings(struct qlcnic_adapter *adapter) { struct net_device *netdev = adapter->netdev; + u8 tx_rings, rx_rings; int err; if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; + tx_rings = adapter->drv_tss_rings; + rx_rings = adapter->drv_rss_rings; + netif_device_detach(netdev); + + err = qlcnic_set_real_num_queues(adapter, tx_rings, rx_rings); + if (err) + goto done; + if (netif_running(netdev)) __qlcnic_down(adapter, netdev); @@ -4003,7 +4042,17 @@ int qlcnic_setup_rings(struct qlcnic_adapter *adapter) return err; } - netif_set_real_num_tx_queues(netdev, adapter->drv_tx_rings); + /* Check if we need to update real_num_{tx|rx}_queues because + * qlcnic_setup_intr() may change Tx/Rx rings size + */ + if ((tx_rings != adapter->drv_tx_rings) || + (rx_rings != adapter->drv_sds_rings)) { + err = qlcnic_set_real_num_queues(adapter, + adapter->drv_tx_rings, + adapter->drv_sds_rings); + if (err) + goto done; + } if (qlcnic_83xx_check(adapter)) { qlcnic_83xx_initialize_nic(adapter, 1); diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c index 32d969e857f7..89b83e59e1dc 100644 --- a/drivers/net/ethernet/sfc/nic.c +++ b/drivers/net/ethernet/sfc/nic.c @@ -156,13 +156,15 @@ void efx_nic_fini_interrupt(struct efx_nic *efx) efx->net_dev->rx_cpu_rmap = NULL; #endif - /* Disable MSI/MSI-X interrupts */ - efx_for_each_channel(channel, efx) - free_irq(channel->irq, &efx->msi_context[channel->channel]); - - /* Disable legacy interrupt */ - if (efx->legacy_irq) + if (EFX_INT_MODE_USE_MSI(efx)) { + /* Disable MSI/MSI-X interrupts */ + efx_for_each_channel(channel, efx) + free_irq(channel->irq, + &efx->msi_context[channel->channel]); + } else { + /* Disable legacy interrupt */ free_irq(efx->legacy_irq, efx); + } } /* Register dump */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index d940034acdd4..0f4841d2e8dc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1704,7 +1704,7 @@ static int stmmac_open(struct net_device *dev) if (ret) { pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); - goto phy_error; + return ret; } } @@ -1779,8 +1779,6 @@ init_error: dma_desc_error: if (priv->phydev) phy_disconnect(priv->phydev); -phy_error: - clk_disable_unprepare(priv->stmmac_clk); return ret; } diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index df8d383acf48..b9ac20f42651 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -246,7 +246,7 @@ static inline void cas_lock_tx(struct cas *cp) int i; for (i = 0; i < N_TX_RINGS; i++) - spin_lock(&cp->tx_lock[i]); + spin_lock_nested(&cp->tx_lock[i], i); } static inline void cas_lock_all(struct cas *cp) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 36aa109416c4..c331b7ebc812 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1871,18 +1871,13 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); phyid = be32_to_cpup(parp+1); mdio = of_find_device_by_node(mdio_node); - - if (strncmp(mdio->name, "gpio", 4) == 0) { - /* GPIO bitbang MDIO driver attached */ - struct mii_bus *bus = dev_get_drvdata(&mdio->dev); - - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, bus->id, phyid); - } else { - /* davinci MDIO driver attached */ - snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), - PHY_ID_FMT, mdio->name, phyid); + of_node_put(mdio_node); + if (!mdio) { + pr_err("Missing mdio platform device\n"); + return -EINVAL; } + snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), + PHY_ID_FMT, mdio->name, phyid); mac_addr = of_get_mac_address(slave_node); if (mac_addr) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index b0e2865a6810..d53e299ae1d9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -458,8 +458,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change) struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; - if (change & IFF_ALLMULTI) - dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + if (dev->flags & IFF_UP) { + if (change & IFF_ALLMULTI) + dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); + } } static void macvlan_set_mac_lists(struct net_device *dev) @@ -515,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) +static int macvlan_get_nest_level(struct net_device *dev) +{ + return ((struct macvlan_dev *)netdev_priv(dev))->nest_level; +} + static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) @@ -525,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev, static void macvlan_set_lockdep_class(struct net_device *dev) { - lockdep_set_class(&dev->addr_list_lock, - &macvlan_netdev_addr_lock_key); + lockdep_set_class_and_subclass(&dev->addr_list_lock, + &macvlan_netdev_addr_lock_key, + macvlan_get_nest_level(dev)); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } @@ -721,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = { .ndo_fdb_add = macvlan_fdb_add, .ndo_fdb_del = macvlan_fdb_del, .ndo_fdb_dump = ndo_dflt_fdb_dump, + .ndo_get_lock_subclass = macvlan_get_nest_level, }; void macvlan_common_setup(struct net_device *dev) @@ -849,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, vlan->dev = dev; vlan->port = port; vlan->set_features = MACVLAN_FEATURES; + vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 9c4defdec67b..5f1a2250018f 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -215,6 +215,10 @@ static int mdio_gpio_probe(struct platform_device *pdev) if (pdev->dev.of_node) { pdata = mdio_gpio_of_get_data(pdev); bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); + if (bus_id < 0) { + dev_warn(&pdev->dev, "failed to get alias id\n"); + bus_id = 0; + } } else { pdata = dev_get_platdata(&pdev->dev); bus_id = pdev->id; diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index a972056b2249..3bc079a67a3d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -715,7 +715,7 @@ void phy_state_machine(struct work_struct *work) struct delayed_work *dwork = to_delayed_work(work); struct phy_device *phydev = container_of(dwork, struct phy_device, state_queue); - int needs_aneg = 0, do_suspend = 0; + bool needs_aneg = false, do_suspend = false, do_resume = false; int err = 0; mutex_lock(&phydev->lock); @@ -727,7 +727,7 @@ void phy_state_machine(struct work_struct *work) case PHY_PENDING: break; case PHY_UP: - needs_aneg = 1; + needs_aneg = true; phydev->link_timeout = PHY_AN_TIMEOUT; @@ -757,7 +757,7 @@ void phy_state_machine(struct work_struct *work) phydev->adjust_link(phydev->attached_dev); } else if (0 == phydev->link_timeout--) - needs_aneg = 1; + needs_aneg = true; break; case PHY_NOLINK: err = phy_read_status(phydev); @@ -791,7 +791,7 @@ void phy_state_machine(struct work_struct *work) netif_carrier_on(phydev->attached_dev); } else { if (0 == phydev->link_timeout--) - needs_aneg = 1; + needs_aneg = true; } phydev->adjust_link(phydev->attached_dev); @@ -827,7 +827,7 @@ void phy_state_machine(struct work_struct *work) phydev->link = 0; netif_carrier_off(phydev->attached_dev); phydev->adjust_link(phydev->attached_dev); - do_suspend = 1; + do_suspend = true; } break; case PHY_RESUMING: @@ -876,6 +876,7 @@ void phy_state_machine(struct work_struct *work) } phydev->adjust_link(phydev->attached_dev); } + do_resume = true; break; } @@ -883,9 +884,10 @@ void phy_state_machine(struct work_struct *work) if (needs_aneg) err = phy_start_aneg(phydev); - - if (do_suspend) + else if (do_suspend) phy_suspend(phydev); + else if (do_resume) + phy_resume(phydev); if (err < 0) phy_error(phydev); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 0ce606624296..4987a1c6dc52 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -614,8 +614,8 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, err = phy_init_hw(phydev); if (err) phy_detach(phydev); - - phy_resume(phydev); + else + phy_resume(phydev); return err; } diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index c9f3281506af..2e025ddcef21 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -120,6 +120,16 @@ static void cdc_mbim_unbind(struct usbnet *dev, struct usb_interface *intf) cdc_ncm_unbind(dev, intf); } +/* verify that the ethernet protocol is IPv4 or IPv6 */ +static bool is_ip_proto(__be16 proto) +{ + switch (proto) { + case htons(ETH_P_IP): + case htons(ETH_P_IPV6): + return true; + } + return false; +} static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { @@ -128,6 +138,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; + bool is_ip; u8 *c; if (!ctx) @@ -137,25 +148,32 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb if (skb->len <= ETH_HLEN) goto error; + /* Some applications using e.g. packet sockets will + * bypass the VLAN acceleration and create tagged + * ethernet frames directly. We primarily look for + * the accelerated out-of-band tag, but fall back if + * required + */ + skb_reset_mac_header(skb); + if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && + __vlan_get_tag(skb, &tci) == 0) { + is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); + skb_pull(skb, VLAN_ETH_HLEN); + } else { + is_ip = is_ip_proto(eth_hdr(skb)->h_proto); + skb_pull(skb, ETH_HLEN); + } + /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4095 => unsupported, drop */ - vlan_get_tag(skb, &tci); - switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ - /* verify that datagram is IPv4 or IPv6 */ - skb_reset_mac_header(skb); - switch (eth_hdr(skb)->h_proto) { - case htons(ETH_P_IP): - case htons(ETH_P_IPV6): - break; - default: + if (!is_ip) goto error; - } c = (u8 *)&sign; c[3] = tci; break; @@ -169,7 +187,6 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb "unsupported tci=0x%04x\n", tci); goto error; } - skb_pull(skb, ETH_HLEN); } spin_lock_bh(&ctx->mtx); @@ -204,17 +221,23 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) return; /* need to send the NA on the VLAN dev, if any */ - if (tci) + rcu_read_lock(); + if (tci) { netdev = __vlan_find_dev_deep(dev->net, htons(ETH_P_8021Q), tci); - else + if (!netdev) { + rcu_read_unlock(); + return; + } + } else { netdev = dev->net; - if (!netdev) - return; + } + dev_hold(netdev); + rcu_read_unlock(); in6_dev = in6_dev_get(netdev); if (!in6_dev) - return; + goto out; is_router = !!in6_dev->cnf.forwarding; in6_dev_put(in6_dev); @@ -224,6 +247,8 @@ static void do_neigh_solicit(struct usbnet *dev, u8 *buf, u16 tci) true /* solicited */, false /* override */, true /* inc_opt */); +out: + dev_put(netdev); } static bool is_neigh_solicit(u8 *buf, size_t len) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index f46cd0250e48..5627917c5ff7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -95,8 +95,10 @@ static void ath9k_htc_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) if ((vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_MESH_POINT) && - bss_conf->enable_beacon) + bss_conf->enable_beacon) { priv->reconfig_beacon = true; + priv->rearm_ani = true; + } if (bss_conf->assoc) { priv->rearm_ani = true; @@ -257,6 +259,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, ath9k_htc_ps_wakeup(priv); + ath9k_htc_stop_ani(priv); del_timer_sync(&priv->tx.cleanup_timer); ath9k_htc_tx_drain(priv); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index afb3d15e38ff..be1985296bdc 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4948,7 +4948,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_if *ifp) if (!err) { /* only set 2G bandwidth using bw_cap command */ band_bwcap.band = cpu_to_le32(WLC_BAND_2G); - band_bwcap.bw_cap = cpu_to_le32(WLC_BW_40MHZ_BIT); + band_bwcap.bw_cap = cpu_to_le32(WLC_BW_CAP_40MHZ); err = brcmf_fil_iovar_data_set(ifp, "bw_cap", &band_bwcap, sizeof(band_bwcap)); } else { diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c index fa858d548d13..0489314425cb 100644 --- a/drivers/net/wireless/iwlwifi/mvm/coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/coex.c @@ -611,14 +611,14 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm) bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO); if (IWL_MVM_BT_COEX_CORUNNING) { - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 | - BT_VALID_CORUN_LUT_40); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 | + BT_VALID_CORUN_LUT_40); bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING); } if (IWL_MVM_BT_COEX_MPLUT) { bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT); - bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); + bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT); } if (mvm->cfg->bt_shared_single_ant) diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 9426905de6b2..d73a89ecd78a 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -183,9 +183,9 @@ enum iwl_scan_type { * this number of packets were received (typically 1) * @passive2active: is auto switching from passive to active during scan allowed * @rxchain_sel_flags: RXON_RX_CHAIN_* - * @max_out_time: in usecs, max out of serving channel time + * @max_out_time: in TUs, max out of serving channel time * @suspend_time: how long to pause scan when returning to service channel: - * bits 0-19: beacon interal in usecs (suspend before executing) + * bits 0-19: beacon interal in TUs (suspend before executing) * bits 20-23: reserved * bits 24-31: number of beacons (suspend between channels) * @rxon_flags: RXON_FLG_* @@ -383,8 +383,8 @@ enum scan_framework_client { * @quiet_plcp_th: quiet channel num of packets threshold * @good_CRC_th: passive to active promotion threshold * @rx_chain: RXON rx chain. - * @max_out_time: max uSec to be out of assoceated channel - * @suspend_time: pause scan this long when returning to service channel + * @max_out_time: max TUs to be out of assoceated channel + * @suspend_time: pause scan this TUs when returning to service channel * @flags: RXON flags * @filter_flags: RXONfilter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c index f0cebf12c7b8..b41dc84e9431 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -1007,7 +1007,7 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); - ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, len, cmd); + ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); if (ret) IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); } @@ -1023,7 +1023,7 @@ static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) return; - ieee80211_iterate_active_interfaces( + ieee80211_iterate_active_interfaces_atomic( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_mc_iface_iterator, &iter_data); } @@ -1807,6 +1807,11 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, mutex_lock(&mvm->mutex); + if (!iwl_mvm_is_idle(mvm)) { + ret = -EBUSY; + goto out; + } + switch (mvm->scan_status) { case IWL_MVM_SCAN_OS: IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index d564233a65da..f1ec0986c3c9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -1003,6 +1003,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) return mvmvif->low_latency; } +/* Assoc status */ +bool iwl_mvm_is_idle(struct iwl_mvm *mvm); + /* Thermal management and CT-kill */ void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff); void iwl_mvm_tt_handler(struct iwl_mvm *mvm); diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c index 9f52c5b3f0ec..e1c838899363 100644 --- a/drivers/net/wireless/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/iwlwifi/mvm/rs.c @@ -1010,7 +1010,7 @@ static void rs_tx_status(void *mvm_r, struct ieee80211_supported_band *sband, return; } -#ifdef CPTCFG_MAC80211_DEBUGFS +#ifdef CONFIG_MAC80211_DEBUGFS /* Disable last tx check if we are debugging with fixed rate */ if (lq_sta->dbg_fixed_rate) { IWL_DEBUG_RATE(mvm, "Fixed rate. avoid rate scaling\n"); diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index c91dc8498852..c28de54c75d4 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -277,51 +277,22 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_scan_condition_iterator, &global_bound); - /* - * Under low latency traffic passive scan is fragmented meaning - * that dwell on a particular channel will be fragmented. Each fragment - * dwell time is 20ms and fragments period is 105ms. Skipping to next - * channel will be delayed by the same period - 105ms. So suspend_time - * parameter describing both fragments and channels skipping periods is - * set to 105ms. This value is chosen so that overall passive scan - * duration will not be too long. Max_out_time in this case is set to - * 70ms, so for active scanning operating channel will be left for 70ms - * while for passive still for 20ms (fragment dwell). - */ - if (global_bound) { - if (!iwl_mvm_low_latency(mvm)) { - params->suspend_time = ieee80211_tu_to_usec(100); - params->max_out_time = ieee80211_tu_to_usec(600); - } else { - params->suspend_time = ieee80211_tu_to_usec(105); - /* P2P doesn't support fragmented passive scan, so - * configure max_out_time to be at least longest dwell - * time for passive scan. - */ - if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { - params->max_out_time = ieee80211_tu_to_usec(70); - params->passive_fragmented = true; - } else { - u32 passive_dwell; - /* - * Use band G so that passive channel dwell time - * will be assigned with maximum value. - */ - band = IEEE80211_BAND_2GHZ; - passive_dwell = iwl_mvm_get_passive_dwell(band); - params->max_out_time = - ieee80211_tu_to_usec(passive_dwell); - } - } + if (!global_bound) + goto not_bound; + + params->suspend_time = 100; + params->max_out_time = 600; + + if (iwl_mvm_low_latency(mvm)) { + params->suspend_time = 250; + params->max_out_time = 250; } +not_bound: + for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) { - if (params->passive_fragmented) - params->dwell[band].passive = 20; - else - params->dwell[band].passive = - iwl_mvm_get_passive_dwell(band); + params->dwell[band].passive = iwl_mvm_get_passive_dwell(band); params->dwell[band].active = iwl_mvm_get_active_dwell(band, n_ssids); } @@ -761,7 +732,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm, int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; int head = 0; - int tail = band_2ghz + band_5ghz; + int tail = band_2ghz + band_5ghz - 1; u32 ssid_bitmap; int cmd_len; int ret; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index d619851745a1..2180902266ae 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -644,3 +644,22 @@ bool iwl_mvm_low_latency(struct iwl_mvm *mvm) return result; } + +static void iwl_mvm_idle_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) +{ + bool *idle = _data; + + if (!vif->bss_conf.idle) + *idle = false; +} + +bool iwl_mvm_is_idle(struct iwl_mvm *mvm) +{ + bool idle = true; + + ieee80211_iterate_active_interfaces_atomic( + mvm->hw, IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_idle_iter, &idle); + + return idle; +} diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index dcfd6d866d09..2365553f1ef7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1749,6 +1749,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, * PCI Tx retries from interfering with C3 CPU state */ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); + trans->dev = &pdev->dev; + trans_pcie->pci_dev = pdev; + iwl_disable_interrupts(trans); + err = pci_enable_msi(pdev); if (err) { dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); @@ -1760,8 +1764,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, } } - trans->dev = &pdev->dev; - trans_pcie->pci_dev = pdev; trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; snprintf(trans->hw_id_str, sizeof(trans->hw_id_str), @@ -1787,8 +1789,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_pci_disable_msi; } - trans_pcie->inta_mask = CSR_INI_SET_MASK; - if (iwl_pcie_alloc_ict(trans)) goto out_free_cmd_pool; @@ -1800,6 +1800,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, goto out_free_ict; } + trans_pcie->inta_mask = CSR_INI_SET_MASK; + return trans; out_free_ict: diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 630a3fcf65bc..0d4a285cbd7e 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -226,7 +226,7 @@ int xenvif_map_frontend_rings(struct xenvif *vif, grant_ref_t rx_ring_ref); /* Check for SKBs from frontend and schedule backend processing */ -void xenvif_check_rx_xenvif(struct xenvif *vif); +void xenvif_napi_schedule_or_enable_events(struct xenvif *vif); /* Prevent the device from generating any further traffic. */ void xenvif_carrier_off(struct xenvif *vif); diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ef05c5c49d41..20e9defa1060 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -75,32 +75,8 @@ static int xenvif_poll(struct napi_struct *napi, int budget) work_done = xenvif_tx_action(vif, budget); if (work_done < budget) { - int more_to_do = 0; - unsigned long flags; - - /* It is necessary to disable IRQ before calling - * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might - * lose event from the frontend. - * - * Consider: - * RING_HAS_UNCONSUMED_REQUESTS - * <frontend generates event to trigger napi_schedule> - * __napi_complete - * - * This handler is still in scheduled state so the - * event has no effect at all. After __napi_complete - * this handler is descheduled and cannot get - * scheduled again. We lose event in this case and the ring - * will be completely stalled. - */ - - local_irq_save(flags); - - RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do); - if (!more_to_do) - __napi_complete(napi); - - local_irq_restore(flags); + napi_complete(napi); + xenvif_napi_schedule_or_enable_events(vif); } return work_done; @@ -194,7 +170,7 @@ static void xenvif_up(struct xenvif *vif) enable_irq(vif->tx_irq); if (vif->tx_irq != vif->rx_irq) enable_irq(vif->rx_irq); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); } static void xenvif_down(struct xenvif *vif) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 76665405c5aa..7367208ee8cd 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -104,7 +104,7 @@ static inline unsigned long idx_to_kaddr(struct xenvif *vif, /* Find the containing VIF's structure from a pointer in pending_tx_info array */ -static inline struct xenvif* ubuf_to_vif(struct ubuf_info *ubuf) +static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf) { u16 pending_idx = ubuf->desc; struct pending_tx_info *temp = @@ -323,6 +323,35 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, } /* + * Find the grant ref for a given frag in a chain of struct ubuf_info's + * skb: the skb itself + * i: the frag's number + * ubuf: a pointer to an element in the chain. It should not be NULL + * + * Returns a pointer to the element in the chain where the page were found. If + * not found, returns NULL. + * See the definition of callback_struct in common.h for more details about + * the chain. + */ +static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb, + const int i, + const struct ubuf_info *ubuf) +{ + struct xenvif *foreign_vif = ubuf_to_vif(ubuf); + + do { + u16 pending_idx = ubuf->desc; + + if (skb_shinfo(skb)->frags[i].page.p == + foreign_vif->mmap_pages[pending_idx]) + break; + ubuf = (struct ubuf_info *) ubuf->ctx; + } while (ubuf); + + return ubuf; +} + +/* * Prepare an SKB to be transmitted to the frontend. * * This function is responsible for allocating grant operations, meta @@ -346,9 +375,8 @@ static int xenvif_gop_skb(struct sk_buff *skb, int head = 1; int old_meta_prod; int gso_type; - struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; - grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; - struct xenvif *foreign_vif = NULL; + const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; + const struct ubuf_info *const head_ubuf = ubuf; old_meta_prod = npo->meta_prod; @@ -386,19 +414,6 @@ static int xenvif_gop_skb(struct sk_buff *skb, npo->copy_off = 0; npo->copy_gref = req->gref; - if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && - (ubuf->callback == &xenvif_zerocopy_callback)) { - int i = 0; - foreign_vif = ubuf_to_vif(ubuf); - - do { - u16 pending_idx = ubuf->desc; - foreign_grefs[i++] = - foreign_vif->pending_tx_info[pending_idx].req.gref; - ubuf = (struct ubuf_info *) ubuf->ctx; - } while (ubuf); - } - data = skb->data; while (data < skb_tail_pointer(skb)) { unsigned int offset = offset_in_page(data); @@ -415,13 +430,60 @@ static int xenvif_gop_skb(struct sk_buff *skb, } for (i = 0; i < nr_frags; i++) { + /* This variable also signals whether foreign_gref has a real + * value or not. + */ + struct xenvif *foreign_vif = NULL; + grant_ref_t foreign_gref; + + if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) && + (ubuf->callback == &xenvif_zerocopy_callback)) { + const struct ubuf_info *const startpoint = ubuf; + + /* Ideally ubuf points to the chain element which + * belongs to this frag. Or if frags were removed from + * the beginning, then shortly before it. + */ + ubuf = xenvif_find_gref(skb, i, ubuf); + + /* Try again from the beginning of the list, if we + * haven't tried from there. This only makes sense in + * the unlikely event of reordering the original frags. + * For injected local pages it's an unnecessary second + * run. + */ + if (unlikely(!ubuf) && startpoint != head_ubuf) + ubuf = xenvif_find_gref(skb, i, head_ubuf); + + if (likely(ubuf)) { + u16 pending_idx = ubuf->desc; + + foreign_vif = ubuf_to_vif(ubuf); + foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref; + /* Just a safety measure. If this was the last + * element on the list, the for loop will + * iterate again if a local page were added to + * the end. Using head_ubuf here prevents the + * second search on the chain. Or the original + * frags changed order, but that's less likely. + * In any way, ubuf shouldn't be NULL. + */ + ubuf = ubuf->ctx ? + (struct ubuf_info *) ubuf->ctx : + head_ubuf; + } else + /* This frag was a local page, added to the + * array after the skb left netback. + */ + ubuf = head_ubuf; + } xenvif_gop_frag_copy(vif, skb, npo, skb_frag_page(&skb_shinfo(skb)->frags[i]), skb_frag_size(&skb_shinfo(skb)->frags[i]), skb_shinfo(skb)->frags[i].page_offset, &head, foreign_vif, - foreign_grefs[i]); + foreign_vif ? foreign_gref : UINT_MAX); } return npo->meta_prod - old_meta_prod; @@ -654,7 +716,7 @@ done: notify_remote_via_irq(vif->rx_irq); } -void xenvif_check_rx_xenvif(struct xenvif *vif) +void xenvif_napi_schedule_or_enable_events(struct xenvif *vif) { int more_to_do; @@ -688,7 +750,7 @@ static void tx_credit_callback(unsigned long data) { struct xenvif *vif = (struct xenvif *)data; tx_add_credit(vif); - xenvif_check_rx_xenvif(vif); + xenvif_napi_schedule_or_enable_events(vif); } static void xenvif_tx_err(struct xenvif *vif, diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig index 6963bdf54175..6aea373547f6 100644 --- a/drivers/ptp/Kconfig +++ b/drivers/ptp/Kconfig @@ -6,6 +6,7 @@ menu "PTP clock support" config PTP_1588_CLOCK tristate "PTP clock support" + depends on NET select PPS select NET_PTP_CLASSIFY help @@ -74,7 +75,7 @@ config DP83640_PHY config PTP_1588_CLOCK_PCH tristate "Intel PCH EG20T as PTP clock" depends on X86 || COMPILE_TEST - depends on HAS_IOMEM + depends on HAS_IOMEM && NET select PTP_1588_CLOCK help This driver adds support for using the PCH EG20T as a PTP diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 1b681427dde0..c341f855fadc 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1621,8 +1621,6 @@ void sas_rphy_free(struct sas_rphy *rphy) list_del(&rphy->list); mutex_unlock(&sas_host->lock); - sas_bsg_remove(shost, rphy); - transport_destroy_device(dev); put_device(dev); @@ -1681,6 +1679,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); + sas_bsg_remove(NULL, rphy); transport_remove_device(dev); device_del(dev); } |